diff --git a/cmd/containerd/builtins/builtins_linux.go b/cmd/containerd/builtins/builtins_linux.go
index de70cffb5ab5..9e569fdae732 100644
--- a/cmd/containerd/builtins/builtins_linux.go
+++ b/cmd/containerd/builtins/builtins_linux.go
@@ -22,6 +22,9 @@ import (
_ "github.com/containerd/containerd/runtime/v1/linux"
_ "github.com/containerd/containerd/runtime/v2/runc/options"
_ "github.com/containerd/containerd/snapshots/blockfile/plugin"
+ _ "github.com/containerd/containerd/snapshots/btrfs/plugin"
_ "github.com/containerd/containerd/snapshots/native/plugin"
_ "github.com/containerd/containerd/snapshots/overlay/plugin"
+ _ "github.com/containerd/fuse-overlayfs-snapshotter/plugin"
+ _ "github.com/containerd/stargz-snapshotter/service/plugin"
)
diff --git a/go.mod b/go.mod
index 1aaa41e79732..4018f7f73ba3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,150 +1,185 @@
module github.com/containerd/containerd
-go 1.21
+go 1.22.0
+
+toolchain go1.22.2
+
+replace (
+ github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.11.7
+ github.com/containerd/go-runc => github.com/containerd/go-runc v1.0.0
+ github.com/containerd/imgcrypt => github.com/containerd/imgcrypt v1.1.11
+ github.com/distribution/reference => github.com/distribution/reference v0.5.0
+)
require (
- dario.cat/mergo v1.0.0
+ dario.cat/mergo v1.0.1
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0
github.com/Microsoft/go-winio v0.6.2
- github.com/Microsoft/hcsshim v0.11.7
+ github.com/Microsoft/hcsshim v0.12.6
github.com/containerd/aufs v1.0.0
github.com/containerd/btrfs/v2 v2.0.0
- github.com/containerd/cgroups/v3 v3.0.2
- github.com/containerd/console v1.0.3
- github.com/containerd/containerd/api v1.7.19
- github.com/containerd/continuity v0.4.2
+ github.com/containerd/cgroups/v3 v3.0.3
+ github.com/containerd/console v1.0.4
+ github.com/containerd/containerd/api v1.8.0-rc.3
+ github.com/containerd/continuity v0.4.3
github.com/containerd/errdefs v0.1.0
github.com/containerd/fifo v1.1.0
- github.com/containerd/go-cni v1.1.9
- github.com/containerd/go-runc v1.0.0
- github.com/containerd/imgcrypt v1.1.8
+ github.com/containerd/fuse-overlayfs-snapshotter v1.0.8
+ github.com/containerd/go-cni v1.1.10
+ github.com/containerd/go-runc v1.1.0
+ github.com/containerd/imgcrypt v1.2.0-rc1
github.com/containerd/log v0.1.0
github.com/containerd/nri v0.6.1
github.com/containerd/platforms v0.2.1
+ github.com/containerd/stargz-snapshotter v0.15.2-0.20240828164322-2030a405041d
github.com/containerd/ttrpc v1.2.5
- github.com/containerd/typeurl/v2 v2.1.1
+ github.com/containerd/typeurl/v2 v2.2.0
github.com/containerd/zfs v1.1.0
- github.com/containernetworking/cni v1.1.2
- github.com/containernetworking/plugins v1.2.0
+ github.com/containernetworking/cni v1.2.3
+ github.com/containernetworking/plugins v1.5.1
github.com/coreos/go-systemd/v22 v22.5.0
- github.com/davecgh/go-spew v1.1.1
- github.com/distribution/reference v0.5.0
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/distribution/reference v0.6.0
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
github.com/docker/go-metrics v0.0.1
github.com/docker/go-units v0.5.0
- github.com/emicklei/go-restful/v3 v3.10.1
- github.com/fsnotify/fsnotify v1.6.0
+ github.com/emicklei/go-restful/v3 v3.11.0
+ github.com/fsnotify/fsnotify v1.7.0
github.com/google/go-cmp v0.6.0
- github.com/google/uuid v1.4.0
+ github.com/google/uuid v1.6.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/go-multierror v1.1.1
- github.com/intel/goresctrl v0.3.0
- github.com/klauspost/compress v1.16.7
+ github.com/intel/goresctrl v0.7.0
+ github.com/klauspost/compress v1.17.9
github.com/minio/sha256-simd v1.0.0
github.com/moby/locker v1.0.1
- github.com/moby/sys/mountinfo v0.6.2
- github.com/moby/sys/sequential v0.5.0
- github.com/moby/sys/signal v0.7.0
- github.com/moby/sys/symlink v0.2.0
+ github.com/moby/sys/mountinfo v0.7.2
+ github.com/moby/sys/sequential v0.6.0
+ github.com/moby/sys/signal v0.7.1
+ github.com/moby/sys/symlink v0.3.0
github.com/moby/sys/user v0.3.0
github.com/moby/sys/userns v0.1.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0
- github.com/opencontainers/runtime-spec v1.1.0
+ github.com/opencontainers/runtime-spec v1.2.0
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626
github.com/opencontainers/selinux v1.11.0
github.com/pelletier/go-toml v1.9.5
- github.com/prometheus/client_golang v1.14.0
+ github.com/prometheus/client_golang v1.20.1
github.com/sirupsen/logrus v1.9.3
- github.com/stretchr/testify v1.8.4
+ github.com/stretchr/testify v1.9.0
github.com/tchap/go-patricia/v2 v2.3.1
- github.com/urfave/cli v1.22.12
+ github.com/urfave/cli v1.22.15
github.com/vishvananda/netlink v1.2.1-beta.2
- go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0
- go.opentelemetry.io/otel v1.21.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0
- go.opentelemetry.io/otel/sdk v1.21.0
- go.opentelemetry.io/otel/trace v1.21.0
- golang.org/x/net v0.23.0
- golang.org/x/sync v0.5.0
- golang.org/x/sys v0.18.0
- google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3
- google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0
- google.golang.org/grpc v1.59.0
- google.golang.org/protobuf v1.33.0
- k8s.io/api v0.26.2
- k8s.io/apimachinery v0.26.2
- k8s.io/apiserver v0.26.2
- k8s.io/client-go v0.26.2
- k8s.io/component-base v0.26.2
- k8s.io/cri-api v0.27.1
- k8s.io/klog/v2 v2.90.1
- k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
- tags.cncf.io/container-device-interface v0.7.2
+ go.etcd.io/bbolt v1.3.11
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0
+ go.opentelemetry.io/otel v1.28.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
+ go.opentelemetry.io/otel/sdk v1.28.0
+ go.opentelemetry.io/otel/trace v1.28.0
+ golang.org/x/net v0.27.0
+ golang.org/x/sync v0.8.0
+ golang.org/x/sys v0.24.0
+ google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094
+ google.golang.org/grpc v1.66.0
+ google.golang.org/protobuf v1.34.2
+ k8s.io/api v0.31.0
+ k8s.io/apimachinery v0.31.0
+ k8s.io/apiserver v0.31.0
+ k8s.io/client-go v0.31.0
+ k8s.io/component-base v0.31.0
+ k8s.io/cri-api v0.32.0-alpha.0
+ k8s.io/klog/v2 v2.130.1
+ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+ tags.cncf.io/container-device-interface v0.8.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
- github.com/cilium/ebpf v0.9.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/cilium/ebpf v0.11.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
+ github.com/containerd/containerd/v2 v2.0.0-rc.4 // indirect
+ github.com/containerd/plugin v0.1.0 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/typeurl v1.0.2 // indirect
- github.com/containers/ocicrypt v1.1.10 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/go-jose/go-jose/v3 v3.0.3 // indirect
- github.com/go-logr/logr v1.3.0 // indirect
+ github.com/containers/ocicrypt v1.2.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
+ github.com/docker/cli v27.1.2+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.7.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.2 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/swag v0.22.4 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/gorilla/websocket v1.5.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+ github.com/hanwen/go-fuse/v2 v2.5.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/cpuid/v2 v2.0.4 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
- github.com/moby/spdystream v0.2.0 // indirect
+ github.com/moby/spdystream v0.4.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
+ github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
- github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
+ github.com/vbatts/tar-split v0.11.5 // indirect
+ github.com/vishvananda/netns v0.0.4 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/otel/metric v1.21.0 // indirect
- go.opentelemetry.io/proto/otlp v1.0.0 // indirect
- golang.org/x/crypto v0.21.0 // indirect
- golang.org/x/mod v0.12.0 // indirect
- golang.org/x/oauth2 v0.11.0 // indirect
- golang.org/x/term v0.18.0 // indirect
- golang.org/x/text v0.14.0 // indirect
- golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
+ go.opentelemetry.io/otel/metric v1.28.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
+ golang.org/x/crypto v0.25.0 // indirect
+ golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 // indirect
+ golang.org/x/mod v0.20.0 // indirect
+ golang.org/x/oauth2 v0.21.0 // indirect
+ golang.org/x/term v0.22.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ golang.org/x/time v0.3.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
- sigs.k8s.io/yaml v1.3.0 // indirect
- tags.cncf.io/container-device-interface/specs-go v0.7.0 // indirect
+ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
+ tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect
)
// Workaround for indirect dependency no longer being available.
diff --git a/go.sum b/go.sum
index cd98318f35a0..1531e87cece9 100644
--- a/go.sum
+++ b/go.sum
@@ -1,9 +1,11 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -14,82 +16,795 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw=
+cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps=
+cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo=
+cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
+cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
+cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
+cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
+cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs=
+cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
+cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw=
+cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
+cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
+cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY=
+cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
+cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg=
+cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
+cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
+cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
+cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E=
+cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
+cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
+cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
+cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ=
+cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
+cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
+cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0=
+cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
+cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
-cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
+cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
+cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
+cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
+cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA=
+cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
+cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
+cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI=
+cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
+cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
+cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
+cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
+cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
+cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0=
+cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
+cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
+cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
+cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E=
+cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
+cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw=
+cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
+cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI=
+cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
+cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
+cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
+cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
+cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
+cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI=
+cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
+cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
+cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE=
+cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
+cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
+cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
+cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE=
+cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
+cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw=
+cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
+cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8=
+cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk=
+cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
+cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
+cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
+cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM=
+cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
+cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0=
+cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ=
+cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc=
+cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig=
+cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
+cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA=
+cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
+cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM=
+cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
+cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0=
+cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ=
+cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
+cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M=
+cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE=
+cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
+cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc=
+cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE=
+cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
+cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs=
+cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0=
+cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0=
+cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
+cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
+cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
+cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU=
+cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE=
+cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
+cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA=
+cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
+cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
+cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw=
+cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
+cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
+cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo=
+cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw=
+cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
+cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
+cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA=
+cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
+cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
+cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
+cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
+cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI=
+cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
+cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
+cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA=
+cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
+cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
+cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
+cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
+cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
+cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo=
+cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
+cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
+cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs=
+cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
+cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg=
+cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
+cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
+github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
+github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg=
+github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -97,63 +812,92 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
+github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
+github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
github.com/containerd/btrfs/v2 v2.0.0 h1:FN4wsx7KQrYoLXN7uLP0vBV4oVWHOIKDRQ1G2Z0oL5M=
github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
+github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
-github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
-github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
+github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
+github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -161,48 +905,61 @@ github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
-github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
+github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw=
+github.com/containerd/containerd v1.6.23/go.mod h1:UrQOiyzrLi3n4aezYJbQH6Il+YzTvnHFbEuO3yfDrM4=
+github.com/containerd/containerd v1.6.26/go.mod h1:I4TRdsdoo5MlKob5khDJS2EPT1l1oMNaE2MBm6FrwxM=
+github.com/containerd/containerd/api v1.8.0-rc.3 h1:q9MyeXmuAGEyKmUGYvvFftNX1RQhfTLsAvYK+SQHQso=
+github.com/containerd/containerd/api v1.8.0-rc.3/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
+github.com/containerd/containerd/v2 v2.0.0-rc.4 h1:Bvto4h5i2VZkQ+L5SrGupg5ilQ+zkVPILdjf9RWMego=
+github.com/containerd/containerd/v2 v2.0.0-rc.4/go.mod h1:p35nJi4Pl9ibzuoVOPc3MputVh6Gbp9xoDg9VHz6/YI=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
-github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
+github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
+github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
+github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
+github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
+github.com/containerd/fuse-overlayfs-snapshotter v1.0.8 h1:O471INHO59/fnSVE+B+THGjvRA2d1K6/FdpUuhNnXwk=
+github.com/containerd/fuse-overlayfs-snapshotter v1.0.8/go.mod h1:mY+oK2oQhlUk6hP5HNG28/OK9oqQpB2wK1w6sudC5gQ=
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU=
-github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34=
+github.com/containerd/go-cni v1.1.10 h1:c2U73nld7spSWfiJwSh/8W9DK+/qQwYM2rngIhCyhyg=
+github.com/containerd/go-cni v1.1.10/go.mod h1:/Y/sL8yqYQn1ZG1om1OncJB1W4zN3YmjfP/ShCzG/OY=
github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.8 h1:ZS7TuywcRNLoHpU0g+v4/PsKynl6TYlw5xDVWWoIyFA=
-github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U=
+github.com/containerd/imgcrypt v1.1.11 h1:3RULIeLouE7B5l9NzMq0HdPWG0DP5deEVxB5UKxyUoU=
+github.com/containerd/imgcrypt v1.1.11/go.mod h1:nXL4jp1GrtO758b16DVsxaHHzu9PravAsKARYmyHR58=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.1/go.mod h1:ZoFbxeZjXxrY3TpGxRjz/naxN16QK9n1Mx2YV2vJoLU=
github.com/containerd/nri v0.6.1 h1:xSQ6elnQ4Ynidm9u49ARK9wRKHs80HCUI+bkXOxV4mA=
github.com/containerd/nri v0.6.1/go.mod h1:7+sX3wNx+LR7RzhjnJiUkFDhn18P5Bg/0VnJ/uXpRJM=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
+github.com/containerd/plugin v0.1.0 h1:CYMyZk9beRAIe1FEKItbMLLAz/z16aXrGc+B+nv0fU4=
+github.com/containerd/plugin v0.1.0/go.mod h1:j6HlpMtkiZMgT4UsfVNxPBUkwdw9KQGU6nCLfRxnq+w=
+github.com/containerd/stargz-snapshotter v0.15.2-0.20240828164322-2030a405041d h1:J39DRec9N/d8ZyM09vdxzPoVfV/xk+3Gr2/z6+0fA0w=
+github.com/containerd/stargz-snapshotter v0.15.2-0.20240828164322-2030a405041d/go.mod h1:FrgpK2plChKARdlCGl0k2BlSVT9i2Hzg/GaK+vXWpeE=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
@@ -210,59 +967,97 @@ github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYz
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
-github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
+github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
+github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containerd/zfs v1.1.0 h1:n7OZ7jZumLIqNJqXrEc/paBM840mORnmGdJDmAmJZHM=
github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
-github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
+github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
+github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
-github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic=
+github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
+github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
+github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
+github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
+github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
+github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
+github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M=
+github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v23.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.1.2+incompatible h1:nYviRv5Y+YAKx3dFrTvS1ErkyVVunKOhoweCTE1BsnI=
+github.com/docker/cli v27.1.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
+github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
+github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
@@ -276,75 +1071,132 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
+github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
+github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
+github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
+github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
+github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
+github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
+github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
+github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
+github.com/go-ini/ini v1.66.6/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
+github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
+github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
-github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -354,10 +1206,14 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
-github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -370,6 +1226,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -385,13 +1243,21 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -403,15 +1269,22 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -419,56 +1292,135 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM=
+github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/hanwen/go-fuse/v2 v2.5.1 h1:OQBE8zVemSocRxA4OaFJbjJ5hlpCmIWbGr7r0M4uoQQ=
+github.com/hanwen/go-fuse/v2 v2.5.1/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
+github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
+github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c=
-github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
+github.com/intel/goresctrl v0.7.0 h1:x6RclP6LiJc24t9mf47BRbjf06B8oVisZMBv31x3rKc=
+github.com/intel/goresctrl v0.7.0/go.mod h1:T3ZZnuHSNouwELB5wvOoUJaB7l/4Rm23rJy/wuWJlr0=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -483,21 +1435,34 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
-github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -505,49 +1470,94 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
+github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
+github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
+github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
+github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
+github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
+github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
+github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
-github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
-github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
-github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
-github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
+github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
+github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
+github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
+github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
+github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
+github.com/moby/sys/symlink v0.3.0 h1:GZX89mEZ9u53f97npBy4Rc3vJKj7JBDj/PN2I22GrNU=
+github.com/moby/sys/symlink v0.3.0/go.mod h1:3eNdhduHmYPcgsJtZXW1W4XUJdZGBIkttZ8xKqPUJq0=
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -555,18 +1565,24 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -574,110 +1590,158 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/ginkgo/v2 v2.5.0 h1:TRtrvv2vdQqzkwrQ1ke6vtXf7IK34RBUJafIy1wMwls=
-github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE=
-github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
+github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
-github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
+github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
+github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.20.1 h1:IMJXHOD6eARkQpxo8KkhgEVFlBNm+nkrFUyGlIu7Na8=
+github.com/prometheus/client_golang v1.20.1/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -687,30 +1751,52 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -719,10 +1805,15 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@@ -732,24 +1823,37 @@ github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BG
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
-github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
+github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.15 h1:nuqt+pdC/KqswQKhETJjo7pvn/k4xMUxgW6liI7XpnM=
+github.com/urfave/cli v1.22.15/go.mod h1:wSan1hmo5zeyLGBjRJbzRTNk8gwoYa2B9n4q9dmRIc0=
+github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
+github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
+github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
+github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -759,19 +1863,36 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
-go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
+go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
+go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -779,60 +1900,139 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 h1:RsQi0qJ2imFfCvZabqzM9cNXBG8k6gXMv1A0cXRmH6A=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM=
-go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
-go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
-go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
-go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
-go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
-go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
-go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
-go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
+go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
+go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
+go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
+go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 h1:qCEDpW1G+vcj3Y7Fy52pEM1AWm3abj8WimGYejI3SC4=
+golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -843,6 +2043,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -851,15 +2053,28 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
+golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -876,6 +2091,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -893,27 +2109,85 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
-golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
+golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
+golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
+golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -924,22 +2198,30 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -948,13 +2230,13 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -963,6 +2245,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -976,68 +2259,155 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -1052,8 +2422,12 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1080,14 +2454,49 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
+golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
+golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1105,13 +2514,58 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
+google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
+google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E=
+google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
+google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4=
+google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -1140,19 +2594,146 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
-google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
-google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
+google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
+google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
+google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
+google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY=
+google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
+google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
+google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1160,7 +2741,6 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -1169,9 +2749,43 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
-google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
+google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
+google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1185,14 +2799,22 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
@@ -1201,16 +2823,16 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1218,11 +2840,18 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
+gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
+gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1230,51 +2859,120 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ=
-k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
+k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
+k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
-k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
+k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
+k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.26.2 h1:Pk8lmX4G14hYqJd1poHGC08G03nIHVqdJMR0SD3IH3o=
-k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
+k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
+k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI=
-k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
+k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI=
-k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
+k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
+k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q=
-k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
+k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
+k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc=
+k8s.io/cri-api v0.32.0-alpha.0 h1:Rs9prajcHWZAdy9ueQdD2R+OOnDD3rKYbM9hQ90iEQU=
+k8s.io/cri-api v0.32.0-alpha.0/go.mod h1:Po3TMAYH/+KrZabi7QiwQI4a692oZcUOUThd/rqwxrI=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
-k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
-k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
+k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk=
-k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
+modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
+modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI=
+modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
+modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
+modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
+modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
+modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
+modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
+modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
+modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA=
+modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0=
+modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
+modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0=
+modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
+modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
+oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
-sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
-tags.cncf.io/container-device-interface v0.7.2 h1:MLqGnWfOr1wB7m08ieI4YJ3IoLKKozEnnNYBtacDPQU=
-tags.cncf.io/container-device-interface v0.7.2/go.mod h1:Xb1PvXv2BhfNb3tla4r9JL129ck1Lxv9KuU6eVOfKto=
-tags.cncf.io/container-device-interface/specs-go v0.7.0 h1:w/maMGVeLP6TIQJVYT5pbqTi8SCw/iHZ+n4ignuGHqg=
-tags.cncf.io/container-device-interface/specs-go v0.7.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc=
+tags.cncf.io/container-device-interface v0.8.0/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
+tags.cncf.io/container-device-interface/specs-go v0.8.0 h1:QYGFzGxvYK/ZLMrjhvY0RjpUavIn4KcmRmVP/JjdBTA=
+tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=
diff --git a/integration/client/go.mod b/integration/client/go.mod
index 201c1de702c5..7ec660c17692 100644
--- a/integration/client/go.mod
+++ b/integration/client/go.mod
@@ -9,7 +9,7 @@ require (
github.com/containerd/cgroups/v3 v3.0.2
github.com/containerd/containerd v1.7.17 // see replace; the actual version of containerd is replaced with the code at the root of this repository
github.com/containerd/containerd/api v1.7.19
- github.com/containerd/continuity v0.4.2
+ github.com/containerd/continuity v0.4.3
github.com/containerd/errdefs v0.1.0
github.com/containerd/go-runc v1.0.0
github.com/containerd/log v0.1.0
@@ -46,7 +46,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-cmp v0.6.0 // indirect
- github.com/klauspost/compress v1.16.7 // indirect
+ github.com/klauspost/compress v1.17.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
diff --git a/integration/client/go.sum b/integration/client/go.sum
index 93de5b103399..baa10f9c7dc3 100644
--- a/integration/client/go.sum
+++ b/integration/client/go.sum
@@ -1171,6 +1171,7 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@@ -1178,12 +1179,14 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.11.1/go.mod h1:nFJmaO4Zr5Y7eADdFOpYswDDlNVbvcIJJNJLECr5JQg=
github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1 h1:pVKfKyPkXna29XlGjxSr9J0A7vNucOUHZ/2ClcTWalw=
@@ -1203,11 +1206,14 @@ github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
+github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE=
+github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
@@ -1302,13 +1308,14 @@ github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARu
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
-github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
-github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
+github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
+github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
+github.com/containerd/fuse-overlayfs-snapshotter v1.0.8/go.mod h1:mY+oK2oQhlUk6hP5HNG28/OK9oqQpB2wK1w6sudC5gQ=
github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34=
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -1321,11 +1328,14 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3
github.com/containerd/nri v0.6.1/go.mod h1:7+sX3wNx+LR7RzhjnJiUkFDhn18P5Bg/0VnJ/uXpRJM=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
+github.com/containerd/stargz-snapshotter v0.15.1/go.mod h1:74D+J1m1RMXytLmWxegXWhtOSRHPWZKpKc2NdK3S+us=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
github.com/containerd/ttrpc v1.2.3/go.mod h1:ieWsXucbb8Mj9PH0rXCw1i8IunRbbAiDkpXkbfflWBM=
github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
@@ -1390,12 +1400,14 @@ github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v23.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
@@ -1438,6 +1450,7 @@ github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87K
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
@@ -1474,6 +1487,7 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
@@ -1495,20 +1509,25 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
@@ -1576,6 +1595,7 @@ github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25ol
github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -1673,15 +1693,19 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/hanwen/go-fuse/v2 v2.4.0/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
@@ -1700,6 +1724,7 @@ github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -1735,8 +1760,8 @@ github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
+github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -1755,6 +1780,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
@@ -1766,6 +1792,7 @@ github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuz
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -1773,7 +1800,9 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@@ -1859,6 +1888,12 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
+github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
+github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
+github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
+github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@@ -1874,6 +1909,11 @@ github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk=
+github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
+github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
+github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@@ -1882,6 +1922,7 @@ github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
@@ -1939,12 +1980,15 @@ github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrb
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -1953,6 +1997,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -1961,8 +2007,10 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
@@ -1974,6 +2022,7 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -2061,7 +2110,9 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
+github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -2076,6 +2127,8 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4=
+github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
@@ -2122,6 +2175,7 @@ go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
+go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
@@ -2151,6 +2205,7 @@ go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
+go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
@@ -3083,15 +3138,18 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
+k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc=
k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0=
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
+k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8=
k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8=
k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
+k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo=
k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU=
-k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
+k8s.io/cri-api v0.29.0-alpha.2/go.mod h1:tvnHHNKvRdPtosUKc0oHBx5RbBOxYq+/VGlb8/G28Ro=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -3101,15 +3159,16 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
@@ -3168,6 +3227,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore
index 529c3412ba95..45ad0f1ae309 100644
--- a/vendor/dario.cat/mergo/.gitignore
+++ b/vendor/dario.cat/mergo/.gitignore
@@ -13,6 +13,9 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
+# Golang/Intellij
+.idea
+
# Project-local glide cache, RE: /~https://github.com/Masterminds/glide/issues/736
.glide/
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
index 7d0cf9f32afe..0b3c488893b0 100644
--- a/vendor/dario.cat/mergo/README.md
+++ b/vendor/dario.cat/mergo/README.md
@@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](/~https://github.com/imdario/mergo#mergo-in-the-wild).
+Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](/~https://github.com/imdario/mergo#mergo-in-the-wild).
+
+No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
### Important notes
#### 1.0.0
-In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
+In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
+
+If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](/~https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
+
+```
+replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
+```
#### 0.3.9
@@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-
### Mergo in the wild
-- [moby/moby](/~https://github.com/moby/moby)
-- [kubernetes/kubernetes](/~https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](/~https://github.com/vmware/dispatch)
-- [Shopify/themekit](/~https://github.com/Shopify/themekit)
-- [imdario/zas](/~https://github.com/imdario/zas)
-- [matcornic/hermes](/~https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](/~https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](/~https://github.com/kataras/iris)
-- [michaelsauter/crane](/~https://github.com/michaelsauter/crane)
-- [go-task/task](/~https://github.com/go-task/task)
-- [sensu/uchiwa](/~https://github.com/sensu/uchiwa)
-- [ory/hydra](/~https://github.com/ory/hydra)
-- [sisatech/vcli](/~https://github.com/sisatech/vcli)
-- [dairycart/dairycart](/~https://github.com/dairycart/dairycart)
-- [projectcalico/felix](/~https://github.com/projectcalico/felix)
-- [resin-os/balena](/~https://github.com/resin-os/balena)
-- [go-kivik/kivik](/~https://github.com/go-kivik/kivik)
-- [Telefonica/govice](/~https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](/~https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](/~https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](/~https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](/~https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](/~https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](/~https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](/~https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](/~https://github.com/casualjim/exeggutor)
-- [divshot/gitling](/~https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](/~https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](/~https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](/~https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](/~https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](/~https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](/~https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](/~https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](/~https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](/~https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](/~https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](/~https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](/~https://github.com/bukalapak/snowboard)
-- [containerssh/containerssh](/~https://github.com/containerssh/containerssh)
-- [goreleaser/goreleaser](/~https://github.com/goreleaser/goreleaser)
-- [tjpnz/structbot](/~https://github.com/tjpnz/structbot)
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](/~https://github.com/containerd/containerd)
+* [datadog/datadog-agent](/~https://github.com/datadog/datadog-agent)
+* [docker/cli/](/~https://github.com/docker/cli/)
+* [goreleaser/goreleaser](/~https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](/~https://github.com/go-micro/go-micro)
+* [grafana/loki](/~https://github.com/grafana/loki)
+* [kubernetes/kubernetes](/~https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](/~https://github.com/moby/moby)
+* [slackhq/nebula](/~https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](/~https://github.com/volcano-sh/volcano)
## Install
@@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
}
```
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A *string
+ B int64
+}
+
+func main() {
+ first := "first"
+ second := "second"
+ src := Foo{
+ A: &first,
+ B: 2,
+ }
+
+ dest := Foo{
+ A: &second,
+ B: 1,
+ }
+
+ mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
```go
diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go
index b50d5c2a4e7c..759b4f74fd5a 100644
--- a/vendor/dario.cat/mergo/map.go
+++ b/vendor/dario.cat/mergo/map.go
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
+ if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
dstMap[fieldName] = src.Field(i).Interface()
}
}
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
index 0ef9b2138c15..fd47c95b2b84 100644
--- a/vendor/dario.cat/mergo/merge.go
+++ b/vendor/dario.cat/mergo/merge.go
@@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
- } else {
+ } else if src.Elem().Kind() != reflect.Struct {
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
dst.Set(src)
}
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
index 16abdfc084f7..9433004a2809 100644
--- a/vendor/github.com/cenkalti/backoff/v4/README.md
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -1,4 +1,4 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[travis]: https://travis-ci.org/cenkalti/backoff
-[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
index 2c56c1e7189b..aac99f196ad5 100644
--- a/vendor/github.com/cenkalti/backoff/v4/exponential.go
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -71,6 +71,9 @@ type Clock interface {
Now() time.Time
}
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
@@ -81,7 +84,7 @@ const (
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff() *ExponentialBackOff {
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
@@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff {
Stop: Stop,
Clock: SystemClock,
}
+ for _, fn := range opts {
+ fn(b)
+ }
b.Reset()
return b
}
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
type systemClock struct{}
func (t systemClock) Now() time.Time {
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b78153..33c88305c46e 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
- [VictoriaMetrics](/~https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](/~https://github.com/coocood/freecache)
- [FastCache](/~https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](/~https://github.com/dgraph-io/ristretto)
+- [Badger](/~https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45c9dcc..78bddf1ceed7 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@ const (
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@@ -33,19 +36,31 @@ type Digest struct {
n int // how much of mem is used
}
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
var d Digest
- d.Reset()
+ d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
func (d *Digest) Reset() {
- d.v1 = primes[0] + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -primes[0]
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
d.total = 0
d.n = 0
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a40c1a..78f95f256103 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13bba4b7..118e49e819e0 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5fd8e4..05f5e7dfe7b7 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
package xxhash
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638fd88a1..cf9d42aed536 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@ import (
//
// See /~https://github.com/golang/go/issues/42739 for discussion.
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format
index 4eb94b1baa87..3f74dc023665 100644
--- a/vendor/github.com/cilium/ebpf/.clang-format
+++ b/vendor/github.com/cilium/ebpf/.clang-format
@@ -14,4 +14,6 @@ KeepEmptyLinesAtTheStartOfBlocks: false
TabWidth: 4
UseTab: ForContinuationAndIndentation
ColumnLimit: 1000
+# Go compiler comments need to stay unindented.
+CommentPragmas: '^go:.*'
...
diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml
index dc62dd6d0fc7..06743dfc91b4 100644
--- a/vendor/github.com/cilium/ebpf/.golangci.yaml
+++ b/vendor/github.com/cilium/ebpf/.golangci.yaml
@@ -9,7 +9,6 @@ issues:
linters:
disable-all: true
enable:
- - deadcode
- errcheck
- goimports
- gosimple
@@ -17,10 +16,9 @@ linters:
- ineffassign
- misspell
- staticcheck
- - structcheck
- typecheck
- unused
- - varcheck
+ - gofmt
# Could be enabled later:
# - gocyclo
diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
index 8cd7e2486e70..26f555eb7a76 100644
--- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
+++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
@@ -1,7 +1,21 @@
Architecture of the library
===
- ELF -> Specifications -> Objects -> Links
+```mermaid
+graph RL
+ Program --> ProgramSpec --> ELF
+ btf.Spec --> ELF
+ Map --> MapSpec --> ELF
+ Links --> Map & Program
+ ProgramSpec -.-> btf.Spec
+ MapSpec -.-> btf.Spec
+ subgraph Collection
+ Program & Map
+ end
+ subgraph CollectionSpec
+ ProgramSpec & MapSpec & btf.Spec
+ end
+```
ELF
---
@@ -11,7 +25,7 @@ an ELF file which contains program byte code (aka BPF), but also metadata for
maps used by the program. The metadata follows the conventions set by libbpf
shipped with the kernel. Certain ELF sections have special meaning
and contain structures defined by libbpf. Newer versions of clang emit
-additional metadata in BPF Type Format (aka BTF).
+additional metadata in [BPF Type Format](#BTF).
The library aims to be compatible with libbpf so that moving from a C toolchain
to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
@@ -20,41 +34,33 @@ if possible.
The output of the ELF reader is a `CollectionSpec` which encodes
all of the information contained in the ELF in a form that is easy to work with
-in Go.
-
-### BTF
-
-The BPF Type Format describes more than just the types used by a BPF program. It
-includes debug aids like which source line corresponds to which instructions and
-what global variables are used.
-
-[BTF parsing](internal/btf/) lives in a separate internal package since exposing
-it would mean an additional maintenance burden, and because the API still
-has sharp corners. The most important concept is the `btf.Type` interface, which
-also describes things that aren't really types like `.rodata` or `.bss` sections.
-`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if
-one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as
-we write more code that deals with it.
+in Go. The returned `CollectionSpec` should be deterministic: reading the same ELF
+file on different systems must produce the same output.
+As a corollary, any changes that depend on the runtime environment like the
+current kernel version must happen when creating [Objects](#Objects).
Specifications
---
-`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel
+`CollectionSpec` is a very simple container for `ProgramSpec`, `MapSpec` and
+`btf.Spec`. Avoid adding functionality to it if possible.
+
+`ProgramSpec` and `MapSpec` are blueprints for in-kernel
objects and contain everything necessary to execute the relevant `bpf(2)`
-syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to
-modify clang-compiled BPF code, for example to rewrite constants. At the same
-time the [asm](asm/) package provides an assembler that can be used to generate
-`ProgramSpec` on the fly.
+syscalls. They refer to `btf.Spec` for type information such as `Map` key and
+value types.
-Creating a spec should never require any privileges or be restricted in any way,
-for example by only allowing programs in native endianness. This ensures that
-the library stays flexible.
+The [asm](asm/) package provides an assembler that can be used to generate
+`ProgramSpec` on the fly.
Objects
---
-`Program` and `Map` are the result of loading specs into the kernel. Sometimes
-loading a spec will fail because the kernel is too old, or a feature is not
+`Program` and `Map` are the result of loading specifications into the kernel.
+Features that depend on knowledge of the current system (e.g kernel version)
+are implemented at this point.
+
+Sometimes loading a spec will fail because the kernel is too old, or a feature is not
enabled. There are multiple ways the library deals with that:
* Fallback: older kernels don't allow naming programs and maps. The library
@@ -73,7 +79,7 @@ useful when our higher-level API doesn't support a particular use case.
Links
---
-BPF can be attached to many different points in the kernel and newer BPF hooks
+Programs can be attached to many different points in the kernel and newer BPF hooks
tend to use bpf_link to do so. Older hooks unfortunately use a combination of
syscalls, netlink messages, etc. Adding support for a new link type should not
pull in large dependencies like netlink, so XDP programs or tracepoints are
diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
index 0d29eae81e3e..bf57da939539 100644
--- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
+++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
@@ -5,15 +5,23 @@ the form of pull requests and issues reporting bugs or suggesting new features
are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
a better understanding for the high-level goals.
-New features must be accompanied by tests. Before starting work on any large
-feature, please [join](https://ebpf.io/slack) the
-[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to
-discuss the design first.
+## Adding a new feature
-When submitting pull requests, consider writing details about what problem you
-are solving and why the proposed approach solves that problem in commit messages
-and/or pull request description to help future library users and maintainers to
-reason about the proposed changes.
+1. [Join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel to discuss your requirements and how the feature can be implemented. The most important part is figuring out how much new exported API is necessary. **The less new API is required the easier it will be to land the feature.**
+2. (*optional*) Create a draft PR if you want to discuss the implementation or have hit a problem. It's fine if this doesn't compile or contains debug statements.
+3. Create a PR that is ready to merge. This must pass CI and have tests.
+
+### API stability
+
+The library doesn't guarantee the stability of its API at the moment.
+
+1. If possible avoid breakage by introducing new API and deprecating the old one
+ at the same time. If an API was deprecated in v0.x it can be removed in v0.x+1.
+2. Breaking API in a way that causes compilation failures is acceptable but must
+ have good reasons.
+3. Changing the semantics of the API without causing compilation failures is
+ heavily discouraged.
## Running the tests
@@ -35,6 +43,6 @@ Examples:
./run-tests.sh 5.4
# Run a subset of tests:
-./run-tests.sh 5.4 go test ./link
+./run-tests.sh 5.4 ./link
```
diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
index 9c18e7e76f54..a56a03e3947f 100644
--- a/vendor/github.com/cilium/ebpf/MAINTAINERS.md
+++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
@@ -1,8 +1,3 @@
# Maintainers
- * [Lorenz Bauer]
- * [Timo Beckers] (Isovalent)
-
-
-[Lorenz Bauer]: /~https://github.com/lmb
-[Timo Beckers]: /~https://github.com/ti-mo
+Maintainers can be found in the [Cilium Maintainers file](/~https://github.com/cilium/community/blob/main/roles/Maintainers.md)
diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile
index 2d5f04c370eb..abcd6c1a47c7 100644
--- a/vendor/github.com/cilium/ebpf/Makefile
+++ b/vendor/github.com/cilium/ebpf/Makefile
@@ -28,6 +28,7 @@ TARGETS := \
testdata/loader-clang-7 \
testdata/loader-clang-9 \
testdata/loader-$(CLANG) \
+ testdata/manyprogs \
testdata/btf_map_init \
testdata/invalid_map \
testdata/raw_tracepoint \
@@ -39,9 +40,15 @@ TARGETS := \
testdata/map_spin_lock \
testdata/subprog_reloc \
testdata/fwd_decl \
+ testdata/kconfig \
+ testdata/kconfig_config \
+ testdata/kfunc \
+ testdata/invalid-kfunc \
+ testdata/kfunc-kmod \
btf/testdata/relocs \
btf/testdata/relocs_read \
- btf/testdata/relocs_read_tgt
+ btf/testdata/relocs_read_tgt \
+ cmd/bpf2go/testdata/minimal
.PHONY: all clean container-all container-shell generate
@@ -49,12 +56,12 @@ TARGETS := \
# Build all ELF binaries using a containerized LLVM toolchain.
container-all:
- ${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \
+ +${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
--env CFLAGS="-fdebug-prefix-map=/ebpf=." \
--env HOME="/tmp" \
"${IMAGE}:${VERSION}" \
- $(MAKE) all
+ make all
# (debug) Drop the user into a shell inside the container as root.
container-shell:
@@ -77,9 +84,7 @@ all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) gene
generate: export BPF_CLANG := $(CLANG)
generate: export BPF_CFLAGS := $(CFLAGS)
generate:
- go generate ./cmd/bpf2go/test
- go generate ./internal/sys
- cd examples/ && go generate ./...
+ go generate ./...
testdata/loader-%-el.elf: testdata/loader.c
$* $(CFLAGS) -target bpfel -c $< -o $@
@@ -98,11 +103,11 @@ testdata/loader-%-eb.elf: testdata/loader.c
$(STRIP) -g $@
.PHONY: generate-btf
-generate-btf: KERNEL_VERSION?=5.18
+generate-btf: KERNEL_VERSION?=5.19
generate-btf:
$(eval TMP := $(shell mktemp -d))
curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION).bz" -o "$(TMP)/bzImage"
- ./testdata/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux"
+ /lib/modules/$(uname -r)/build/scripts/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux"
$(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz"
curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-selftests-bpf.tgz" -o "$(TMP)/selftests.tgz"
tar -xf "$(TMP)/selftests.tgz" --to-stdout tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko | \
diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md
index 3e490de71101..eff08d8df699 100644
--- a/vendor/github.com/cilium/ebpf/README.md
+++ b/vendor/github.com/cilium/ebpf/README.md
@@ -4,33 +4,37 @@
![HoneyGopher](.github/images/cilium-ebpf.png)
-eBPF is a pure Go library that provides utilities for loading, compiling, and
+ebpf-go is a pure Go library that provides utilities for loading, compiling, and
debugging eBPF programs. It has minimal external dependencies and is intended to
be used in long running processes.
-The library is maintained by [Cloudflare](https://www.cloudflare.com) and
-[Cilium](https://www.cilium.io).
-
-See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem.
+See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF
+ecosystem.
## Getting Started
A small collection of Go and eBPF programs that serve as examples for building
your own tools can be found under [examples/](examples/).
-Contributions are highly encouraged, as they highlight certain use cases of
+[Contributions](CONTRIBUTING.md) are highly encouraged, as they highlight certain use cases of
eBPF and the library, and help shape the future of the project.
## Getting Help
-Please
-[join](https://ebpf.io/slack) the
+The community actively monitors our [GitHub Discussions](/~https://github.com/cilium/ebpf/discussions) page.
+Please search for existing threads before starting a new one. Refrain from
+opening issues on the bug tracker if you're just starting out or if you're not
+sure if something is a bug in the library code.
+
+Alternatively, [join](https://ebpf.io/slack) the
[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you
-have questions regarding the library.
+have other questions regarding the project. Note that this channel is ephemeral
+and has its history erased past a certain point, which is less helpful for
+others running into the same problem later.
## Packages
-This library includes the following packages:
+This library includes the following packages:
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
assembler, allowing you to write eBPF assembly instructions directly
@@ -38,7 +42,7 @@ This library includes the following packages:
* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
compiling and embedding eBPF programs written in C within Go code. As well as
compiling the C code, it auto-generates Go code for loading and manipulating
- the eBPF program and map objects.
+ the eBPF program and map objects.
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
to various hooks
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
@@ -49,6 +53,7 @@ This library includes the following packages:
of `bpftool feature probe` for discovering BPF-related kernel features using native Go.
* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift
the `RLIMIT_MEMLOCK` constraint on kernels before 5.11.
+* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format.
## Requirements
diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go
index 70ccc4d1518d..3f60245f2b6d 100644
--- a/vendor/github.com/cilium/ebpf/asm/alu.go
+++ b/vendor/github.com/cilium/ebpf/asm/alu.go
@@ -4,10 +4,10 @@ package asm
// Source of ALU / ALU64 / Branch operations
//
-// msb lsb
-// +----+-+---+
-// |op |S|cls|
-// +----+-+---+
+// msb lsb
+// +----+-+---+
+// |op |S|cls|
+// +----+-+---+
type Source uint8
const sourceMask OpCode = 0x08
@@ -39,10 +39,10 @@ const (
// ALUOp are ALU / ALU64 operations
//
-// msb lsb
-// +----+-+---+
-// |OP |s|cls|
-// +----+-+---+
+// msb lsb
+// +----+-+---+
+// |OP |s|cls|
+// +----+-+---+
type ALUOp uint8
const aluMask OpCode = 0xf0
diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go
index a14e9e2c3ce9..18f6a75db58a 100644
--- a/vendor/github.com/cilium/ebpf/asm/func.go
+++ b/vendor/github.com/cilium/ebpf/asm/func.go
@@ -13,15 +13,15 @@ func (_ BuiltinFunc) Max() BuiltinFunc {
//
// You can regenerate this list using the following gawk script:
//
-// /FN\(.+\),/ {
-// match($1, /\((.+)\)/, r)
-// split(r[1], p, "_")
-// printf "Fn"
-// for (i in p) {
-// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
-// }
-// print ""
-// }
+// /FN\(.+\),/ {
+// match($1, /\(([a-z_0-9]+),/, r)
+// split(r[1], p, "_")
+// printf "Fn"
+// for (i in p) {
+// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
+// }
+// print ""
+// }
//
// The script expects include/uapi/linux/bpf.h as it's input.
const (
@@ -229,6 +229,14 @@ const (
FnDynptrRead
FnDynptrWrite
FnDynptrData
+ FnTcpRawGenSyncookieIpv4
+ FnTcpRawGenSyncookieIpv6
+ FnTcpRawCheckSyncookieIpv4
+ FnTcpRawCheckSyncookieIpv6
+ FnKtimeGetTaiNs
+ FnUserRingbufDrain
+ FnCgrpStorageGet
+ FnCgrpStorageDelete
maxBuiltinFunc
)
diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go
index b7431b7f605a..47150bc4f2d2 100644
--- a/vendor/github.com/cilium/ebpf/asm/func_string.go
+++ b/vendor/github.com/cilium/ebpf/asm/func_string.go
@@ -212,12 +212,20 @@ func _() {
_ = x[FnDynptrRead-201]
_ = x[FnDynptrWrite-202]
_ = x[FnDynptrData-203]
- _ = x[maxBuiltinFunc-204]
+ _ = x[FnTcpRawGenSyncookieIpv4-204]
+ _ = x[FnTcpRawGenSyncookieIpv6-205]
+ _ = x[FnTcpRawCheckSyncookieIpv4-206]
+ _ = x[FnTcpRawCheckSyncookieIpv6-207]
+ _ = x[FnKtimeGetTaiNs-208]
+ _ = x[FnUserRingbufDrain-209]
+ _ = x[FnCgrpStorageGet-210]
+ _ = x[FnCgrpStorageDelete-211]
+ _ = x[maxBuiltinFunc-212]
}
-const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDatamaxBuiltinFunc"
+const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc"
-var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3011}
+var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179}
func (i BuiltinFunc) String() string {
if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go
index f17d88b51869..ef01eaa35ae9 100644
--- a/vendor/github.com/cilium/ebpf/asm/instruction.go
+++ b/vendor/github.com/cilium/ebpf/asm/instruction.go
@@ -226,6 +226,13 @@ func (ins *Instruction) IsFunctionCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
}
+// IsKfuncCall returns true if the instruction calls a kfunc.
+//
+// This is not the same thing as a BPF helper call.
+func (ins *Instruction) IsKfuncCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall
+}
+
// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer.
func (ins *Instruction) IsLoadOfFunctionPointer() bool {
return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc
@@ -318,10 +325,14 @@ func (ins Instruction) Format(f fmt.State, c rune) {
case cls.IsJump():
switch jop := op.JumpOp(); jop {
case Call:
- if ins.Src == PseudoCall {
+ switch ins.Src {
+ case PseudoCall:
// bpf-to-bpf call
fmt.Fprint(f, ins.Constant)
- } else {
+ case PseudoKfuncCall:
+ // kfunc call
+ fmt.Fprintf(f, "Kfunc(%d)", ins.Constant)
+ default:
fmt.Fprint(f, BuiltinFunc(ins.Constant))
}
@@ -354,6 +365,13 @@ func (ins Instruction) Size() uint64 {
return uint64(InstructionSize * ins.OpCode.rawInstructions())
}
+// WithMetadata sets the given Metadata on the Instruction. e.g. to copy
+// Metadata from another Instruction when replacing it.
+func (ins Instruction) WithMetadata(meta Metadata) Instruction {
+ ins.Metadata = meta
+ return ins
+}
+
type symbolMeta struct{}
// WithSymbol marks the Instruction as a Symbol, which other Instructions
diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go
index e31e42cac52c..2c8a3dbb7a36 100644
--- a/vendor/github.com/cilium/ebpf/asm/jump.go
+++ b/vendor/github.com/cilium/ebpf/asm/jump.go
@@ -4,10 +4,10 @@ package asm
// JumpOp affect control flow.
//
-// msb lsb
-// +----+-+---+
-// |OP |s|cls|
-// +----+-+---+
+// msb lsb
+// +----+-+---+
+// |OP |s|cls|
+// +----+-+---+
type JumpOp uint8
const jumpMask OpCode = aluMask
diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go
index 85ed286b02b0..f109497aebcb 100644
--- a/vendor/github.com/cilium/ebpf/asm/load_store.go
+++ b/vendor/github.com/cilium/ebpf/asm/load_store.go
@@ -4,10 +4,10 @@ package asm
// Mode for load and store operations
//
-// msb lsb
-// +---+--+---+
-// |MDE|sz|cls|
-// +---+--+---+
+// msb lsb
+// +---+--+---+
+// |MDE|sz|cls|
+// +---+--+---+
type Mode uint8
const modeMask OpCode = 0xe0
@@ -30,10 +30,10 @@ const (
// Size of load and store operations
//
-// msb lsb
-// +---+--+---+
-// |mde|SZ|cls|
-// +---+--+---+
+// msb lsb
+// +---+--+---+
+// |mde|SZ|cls|
+// +---+--+---+
type Size uint8
const sizeMask OpCode = 0x18
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go
index b11917e18bbc..9e3c30b0b3a9 100644
--- a/vendor/github.com/cilium/ebpf/asm/opcode.go
+++ b/vendor/github.com/cilium/ebpf/asm/opcode.go
@@ -9,10 +9,10 @@ import (
// Class of operations
//
-// msb lsb
-// +---+--+---+
-// | ?? |CLS|
-// +---+--+---+
+// msb lsb
+// +---+--+---+
+// | ?? |CLS|
+// +---+--+---+
type Class uint8
const classMask OpCode = 0x07
@@ -70,10 +70,10 @@ func (cls Class) isJumpOrALU() bool {
//
// Its encoding is defined by a Class value:
//
-// msb lsb
-// +----+-+---+
-// | ???? |CLS|
-// +----+-+---+
+// msb lsb
+// +----+-+---+
+// | ???? |CLS|
+// +----+-+---+
type OpCode uint8
// InvalidOpCode is returned by setters on OpCode
diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go
index dd5d44f1c191..457a3b8a8838 100644
--- a/vendor/github.com/cilium/ebpf/asm/register.go
+++ b/vendor/github.com/cilium/ebpf/asm/register.go
@@ -35,10 +35,11 @@ const (
// Pseudo registers used by 64bit loads and jumps
const (
- PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
- PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
- PseudoCall = R1 // BPF_PSEUDO_CALL
- PseudoFunc = R4 // BPF_PSEUDO_FUNC
+ PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
+ PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
+ PseudoCall = R1 // BPF_PSEUDO_CALL
+ PseudoFunc = R4 // BPF_PSEUDO_FUNC
+ PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL
)
func (r Register) String() string {
diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go
index de355ed90922..add2a3b5cc9b 100644
--- a/vendor/github.com/cilium/ebpf/attachtype_string.go
+++ b/vendor/github.com/cilium/ebpf/attachtype_string.go
@@ -51,11 +51,12 @@ func _() {
_ = x[AttachSkReuseportSelect-39]
_ = x[AttachSkReuseportSelectOrMigrate-40]
_ = x[AttachPerfEvent-41]
+ _ = x[AttachTraceKprobeMulti-42]
}
-const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent"
+const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMulti"
-var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610}
+var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626}
func (i AttachType) String() string {
if i >= AttachType(len(_AttachType_index)-1) {
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
index a5969332aaa8..86eb7d6819d5 100644
--- a/vendor/github.com/cilium/ebpf/btf/btf.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -2,7 +2,6 @@ package btf
import (
"bufio"
- "bytes"
"debug/elf"
"encoding/binary"
"errors"
@@ -11,6 +10,7 @@ import (
"math"
"os"
"reflect"
+ "sync"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
@@ -21,34 +21,41 @@ const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
- ErrNotSupported = internal.ErrNotSupported
- ErrNotFound = errors.New("not found")
- ErrNoExtendedInfo = errors.New("no extended info")
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+ ErrMultipleMatches = errors.New("multiple matching types")
)
// ID represents the unique ID of a BTF object.
type ID = sys.BTFID
-// Spec represents decoded BTF.
+// Spec allows querying a set of Types and loading the set into the
+// kernel.
type Spec struct {
- // Data from .BTF.
- rawTypes []rawType
- strings *stringTable
-
- // All types contained by the spec. For the base type, the position of
- // a type in the slice is its ID.
- types types
+ // All types contained by the spec, not including types from the base in
+ // case the spec was parsed from split BTF.
+ types []Type
// Type IDs indexed by type.
typeIDs map[Type]TypeID
+ // The ID of the first type in types.
+ firstTypeID TypeID
+
// Types indexed by essential name.
// Includes all struct flavors and types with the same name.
namedTypes map[essentialName][]Type
+ // String table from ELF, may be nil.
+ strings *stringTable
+
+ // Byte order of the ELF we decoded the spec from, may be nil.
byteOrder binary.ByteOrder
}
+var btfHeaderLen = binary.Size(&btfHeader{})
+
type btfHeader struct {
Magic uint16
Version uint8
@@ -73,6 +80,18 @@ func (h *btfHeader) stringStart() int64 {
return int64(h.HdrLen + h.StringOff)
}
+// newSpec creates a Spec containing only Void.
+func newSpec() *Spec {
+ return &Spec{
+ []Type{(*Void)(nil)},
+ map[Type]TypeID{(*Void)(nil): 0},
+ 0,
+ make(map[essentialName][]Type),
+ nil,
+ nil,
+ }
+}
+
// LoadSpec opens file and calls LoadSpecFromReader on it.
func LoadSpec(file string) (*Spec, error) {
fh, err := os.Open(file)
@@ -92,10 +111,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
if bo := guessRawBTFByteOrder(rd); bo != nil {
- // Try to parse a naked BTF blob. This will return an error if
- // we encounter a Datasec, since we can't fix it up.
- spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
- return spec, err
+ return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
}
return nil, err
@@ -106,7 +122,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
// LoadSpecAndExtInfosFromReader reads from an ELF.
//
-// ExtInfos may be nil if the ELF doesn't contain section metadta.
+// ExtInfos may be nil if the ELF doesn't contain section metadata.
// Returns ErrNotFound if the ELF contains no BTF.
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
file, err := internal.NewSafeELFFile(rd)
@@ -119,7 +135,7 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return nil, nil, err
}
- extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
+ extInfos, err := loadExtInfosFromELF(file, spec)
if err != nil && !errors.Is(err, ErrNotFound) {
return nil, nil, err
}
@@ -127,40 +143,40 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return spec, extInfos, nil
}
-// variableOffsets extracts all symbols offsets from an ELF and indexes them by
+// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
// section and variable name.
//
// References to variables in BTF data sections carry unsigned 32-bit offsets.
// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
// beyond this range. Since these symbols cannot be described by BTF info,
// ignore them here.
-func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
+func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) {
symbols, err := file.Symbols()
if err != nil {
return nil, fmt.Errorf("can't read symbols: %v", err)
}
- variableOffsets := make(map[variable]uint32)
- for _, symbol := range symbols {
- if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ offsets := make(map[symbol]uint32)
+ for _, sym := range symbols {
+ if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
// Ignore things like SHN_ABS
continue
}
- if symbol.Value > math.MaxUint32 {
+ if sym.Value > math.MaxUint32 {
// VarSecinfo offset is u32, cannot reference symbols in higher regions.
continue
}
- if int(symbol.Section) >= len(file.Sections) {
- return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
+ if int(sym.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
}
- secName := file.Sections[symbol.Section].Name
- variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
+ secName := file.Sections[sym.Section].Name
+ offsets[symbol{secName, sym.Name}] = uint32(sym.Value)
}
- return variableOffsets, nil
+ return offsets, nil
}
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
@@ -190,7 +206,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
- vars, err := variableOffsets(file)
+ offsets, err := symbolOffsets(file)
if err != nil {
return nil, err
}
@@ -199,51 +215,66 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("compressed BTF is not supported")
}
- rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
+ spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
if err != nil {
return nil, err
}
- err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
+ err = fixupDatasec(spec.types, sectionSizes, offsets)
if err != nil {
return nil, err
}
- return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
+ return spec, nil
}
-func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
- baseTypes types, baseStrings *stringTable) (*Spec, error) {
+func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
+ var (
+ baseStrings *stringTable
+ firstTypeID TypeID
+ err error
+ )
+
+ if base != nil {
+ if base.firstTypeID != 0 {
+ return nil, fmt.Errorf("can't use split BTF as base")
+ }
+
+ if base.strings == nil {
+ return nil, fmt.Errorf("parse split BTF: base must be loaded from an ELF")
+ }
+
+ baseStrings = base.strings
+
+ firstTypeID, err = base.nextTypeID()
+ if err != nil {
+ return nil, err
+ }
+ }
rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
if err != nil {
return nil, err
}
- return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
-}
-
-func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
- baseTypes types) (*Spec, error) {
-
- types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
+ types, err := inflateRawTypes(rawTypes, rawStrings, base)
if err != nil {
return nil, err
}
- typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
+ typeIDs, typesByName := indexTypes(types, firstTypeID)
return &Spec{
- rawTypes: rawTypes,
- namedTypes: typesByName,
- typeIDs: typeIDs,
- types: types,
- strings: rawStrings,
- byteOrder: bo,
+ namedTypes: typesByName,
+ typeIDs: typeIDs,
+ types: types,
+ firstTypeID: firstTypeID,
+ strings: rawStrings,
+ byteOrder: bo,
}, nil
}
-func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
+func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
namedTypes := 0
for _, typ := range types {
if typ.TypeName() != "" {
@@ -261,7 +292,7 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
if name := newEssentialName(typ.TypeName()); name != "" {
typesByName[name] = append(typesByName[name], typ)
}
- typeIDs[typ] = TypeID(i) + typeIDOffset
+ typeIDs[typ] = firstTypeID + TypeID(i)
}
return typeIDs, typesByName
@@ -272,20 +303,70 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
+ spec, _, err := kernelSpec()
+ if err != nil {
+ return nil, err
+ }
+ return spec.Copy(), nil
+}
+
+var kernelBTF struct {
+ sync.RWMutex
+ spec *Spec
+ // True if the spec was read from an ELF instead of raw BTF in /sys.
+ fallback bool
+}
+
+// FlushKernelSpec removes any cached kernel type information.
+func FlushKernelSpec() {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ kernelBTF.spec, kernelBTF.fallback = nil, false
+}
+
+func kernelSpec() (*Spec, bool, error) {
+ kernelBTF.RLock()
+ spec, fallback := kernelBTF.spec, kernelBTF.fallback
+ kernelBTF.RUnlock()
+
+ if spec == nil {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ spec, fallback = kernelBTF.spec, kernelBTF.fallback
+ }
+
+ if spec != nil {
+ return spec, fallback, nil
+ }
+
+ spec, fallback, err := loadKernelSpec()
+ if err != nil {
+ return nil, false, err
+ }
+
+ kernelBTF.spec, kernelBTF.fallback = spec, fallback
+ return spec, fallback, nil
+}
+
+func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
- return loadRawSpec(fh, internal.NativeEndian, nil, nil)
+ spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
+ return spec, false, err
}
file, err := findVMLinux()
if err != nil {
- return nil, err
+ return nil, false, err
}
defer file.Close()
- return loadSpecFromELF(file)
+ spec, err := loadSpecFromELF(file)
+ return spec, true, err
}
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
@@ -388,140 +469,122 @@ func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([
return rawTypes, rawStrings, nil
}
-type variable struct {
+type symbol struct {
section string
name string
}
-func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
- for i, rawType := range rawTypes {
- if rawType.Kind() != kindDatasec {
+// fixupDatasec attempts to patch up missing info in Datasecs and its members by
+// supplementing them with information from the ELF headers and symbol table.
+func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
+ for _, typ := range types {
+ ds, ok := typ.(*Datasec)
+ if !ok {
continue
}
- name, err := rawStrings.Lookup(rawType.NameOff)
- if err != nil {
- return err
- }
+ name := ds.Name
+
+ // Some Datasecs are virtual and don't have corresponding ELF sections.
+ switch name {
+ case ".ksyms":
+ // .ksyms describes forward declarations of kfunc signatures.
+ // Nothing to fix up, all sizes and offsets are 0.
+ for _, vsi := range ds.Vars {
+ _, ok := vsi.Type.(*Func)
+ if !ok {
+ // Only Funcs are supported in the .ksyms Datasec.
+ return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported)
+ }
+ }
+
+ continue
+ case ".kconfig":
+ // .kconfig has a size of 0 and has all members' offsets set to 0.
+ // Fix up all offsets and set the Datasec's size.
+ if err := fixupDatasecLayout(ds); err != nil {
+ return err
+ }
- if name == ".kconfig" || name == ".ksyms" {
- return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
+ // Fix up extern to global linkage to avoid a BTF verifier error.
+ for _, vsi := range ds.Vars {
+ vsi.Type.(*Var).Linkage = GlobalVar
+ }
+
+ continue
}
- if rawTypes[i].SizeType != 0 {
+ if ds.Size != 0 {
continue
}
- size, ok := sectionSizes[name]
+ ds.Size, ok = sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
}
- rawTypes[i].SizeType = size
-
- secinfos := rawType.data.([]btfVarSecinfo)
- for j, secInfo := range secinfos {
- id := int(secInfo.Type - 1)
- if id >= len(rawTypes) {
- return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
+ for i := range ds.Vars {
+ symName := ds.Vars[i].Type.TypeName()
+ ds.Vars[i].Offset, ok = offsets[symbol{name, symName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
}
+ }
+ }
- varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
- if err != nil {
- return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
- }
+ return nil
+}
- offset, ok := variableOffsets[variable{name, varName}]
- if !ok {
- return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
- }
+// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
+// alignment. Calculate and set ds.Size.
+func fixupDatasecLayout(ds *Datasec) error {
+ var off uint32
- secinfos[j].Offset = offset
+ for i, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
+ }
+
+ size, err := Sizeof(v.Type)
+ if err != nil {
+ return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
}
+ align, err := alignof(v.Type)
+ if err != nil {
+ return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
+ }
+
+ // Align the current member based on the offset of the end of the previous
+ // member and the alignment of the current member.
+ off = internal.Align(off, uint32(align))
+
+ ds.Vars[i].Offset = off
+
+ off += uint32(size)
}
+ ds.Size = off
+
return nil
}
// Copy creates a copy of Spec.
func (s *Spec) Copy() *Spec {
types := copyTypes(s.types, nil)
-
- typeIDOffset := TypeID(0)
- if len(s.types) != 0 {
- typeIDOffset = s.typeIDs[s.types[0]]
- }
- typeIDs, typesByName := indexTypes(types, typeIDOffset)
+ typeIDs, typesByName := indexTypes(types, s.firstTypeID)
// NB: Other parts of spec are not copied since they are immutable.
return &Spec{
- s.rawTypes,
- s.strings,
types,
typeIDs,
+ s.firstTypeID,
typesByName,
+ s.strings,
s.byteOrder,
}
}
-type marshalOpts struct {
- ByteOrder binary.ByteOrder
- StripFuncLinkage bool
-}
-
-func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
- var (
- buf bytes.Buffer
- header = new(btfHeader)
- headerLen = binary.Size(header)
- )
-
- // Reserve space for the header. We have to write it last since
- // we don't know the size of the type section yet.
- _, _ = buf.Write(make([]byte, headerLen))
-
- // Write type section, just after the header.
- for _, raw := range s.rawTypes {
- switch {
- case opts.StripFuncLinkage && raw.Kind() == kindFunc:
- raw.SetLinkage(StaticFunc)
- }
-
- if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
- return nil, fmt.Errorf("can't marshal BTF: %w", err)
- }
- }
-
- typeLen := uint32(buf.Len() - headerLen)
-
- // Write string section after type section.
- stringsLen := s.strings.Length()
- buf.Grow(stringsLen)
- if err := s.strings.Marshal(&buf); err != nil {
- return nil, err
- }
-
- // Fill out the header, and write it out.
- header = &btfHeader{
- Magic: btfMagic,
- Version: 1,
- Flags: 0,
- HdrLen: uint32(headerLen),
- TypeOff: 0,
- TypeLen: typeLen,
- StringOff: typeLen,
- StringLen: uint32(stringsLen),
- }
-
- raw := buf.Bytes()
- err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
- if err != nil {
- return nil, fmt.Errorf("can't write header: %v", err)
- }
-
- return raw, nil
-}
-
type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
@@ -532,12 +595,31 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
return copy(sw, p), nil
}
+// nextTypeID returns the next unallocated type ID or an error if there are no
+// more type IDs.
+func (s *Spec) nextTypeID() (TypeID, error) {
+ id := s.firstTypeID + TypeID(len(s.types))
+ if id < s.firstTypeID {
+ return 0, fmt.Errorf("no more type IDs")
+ }
+ return id, nil
+}
+
// TypeByID returns the BTF Type with the given type ID.
//
// Returns an error wrapping ErrNotFound if a Type with the given ID
// does not exist in the Spec.
func (s *Spec) TypeByID(id TypeID) (Type, error) {
- return s.types.ByID(id)
+ if id < s.firstTypeID {
+ return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
+ }
+
+ index := int(id - s.firstTypeID)
+ if index >= len(s.types) {
+ return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
+ }
+
+ return s.types[index], nil
}
// TypeID returns the ID for a given Type.
@@ -598,17 +680,19 @@ func (s *Spec) AnyTypeByName(name string) (Type, error) {
return types[0], nil
}
-// TypeByName searches for a Type with a specific name. Since multiple
-// Types with the same name can exist, the parameter typ is taken to
-// narrow down the search in case of a clash.
+// TypeByName searches for a Type with a specific name. Since multiple Types
+// with the same name can exist, the parameter typ is taken to narrow down the
+// search in case of a clash.
//
-// typ must be a non-nil pointer to an implementation of a Type.
-// On success, the address of the found Type will be copied to typ.
+// typ must be a non-nil pointer to an implementation of a Type. On success, the
+// address of the found Type will be copied to typ.
//
-// Returns an error wrapping ErrNotFound if no matching
-// Type exists in the Spec. If multiple candidates are found,
-// an error is returned.
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
func (s *Spec) TypeByName(name string, typ interface{}) error {
+ typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
+
+ // typ may be **T or *Type
typValue := reflect.ValueOf(typ)
if typValue.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", typ)
@@ -620,7 +704,12 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
wanted := typPtr.Type()
- if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
+ if wanted == typeInterface {
+ // This is *Type. Unwrap the value's type.
+ wanted = typPtr.Elem().Type()
+ }
+
+ if !wanted.AssignableTo(typeInterface) {
return fmt.Errorf("%T does not satisfy Type interface", typ)
}
@@ -636,14 +725,14 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
if candidate != nil {
- return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
+ return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
}
candidate = typ
}
if candidate == nil {
- return fmt.Errorf("type %s: %w", name, ErrNotFound)
+ return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound)
}
typPtr.Set(reflect.ValueOf(candidate))
@@ -656,12 +745,12 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
// Types from base are used to resolve references in the split BTF.
// The returned Spec only contains types from the split BTF, not from the base.
func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
- return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
+ return loadRawSpec(r, internal.NativeEndian, base)
}
// TypesIterator iterates over types of a given spec.
type TypesIterator struct {
- spec *Spec
+ types []Type
index int
// The last visited type in the spec.
Type Type
@@ -669,229 +758,112 @@ type TypesIterator struct {
// Iterate returns the types iterator.
func (s *Spec) Iterate() *TypesIterator {
- return &TypesIterator{spec: s, index: 0}
+ // We share the backing array of types with the Spec. This is safe since
+ // we don't allow deletion or shuffling of types.
+ return &TypesIterator{types: s.types, index: 0}
}
// Next returns true as long as there are any remaining types.
func (iter *TypesIterator) Next() bool {
- if len(iter.spec.types) <= iter.index {
+ if len(iter.types) <= iter.index {
return false
}
- iter.Type = iter.spec.types[iter.index]
+ iter.Type = iter.types[iter.index]
iter.index++
return true
}
-// Handle is a reference to BTF loaded into the kernel.
-type Handle struct {
- fd *sys.FD
-
- // Size of the raw BTF in bytes.
- size uint32
-}
-
-// NewHandle loads BTF into the kernel.
-//
-// Returns ErrNotSupported if BTF is not supported.
-func NewHandle(spec *Spec) (*Handle, error) {
- if err := haveBTF(); err != nil {
- return nil, err
- }
-
- if spec.byteOrder != internal.NativeEndian {
- return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
- }
-
- btf, err := spec.marshal(marshalOpts{
- ByteOrder: internal.NativeEndian,
- StripFuncLinkage: haveFuncLinkage() != nil,
- })
- if err != nil {
- return nil, fmt.Errorf("can't marshal BTF: %w", err)
- }
-
- if uint64(len(btf)) > math.MaxUint32 {
- return nil, errors.New("BTF exceeds the maximum size")
+// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
+// kernel that supports BPF_BTF_LOAD.
+var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
+ // 0-length anonymous integer
+ err := probeBTF(&Int{})
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ return internal.ErrNotSupported
}
+ return err
+})
- attr := &sys.BtfLoadAttr{
- Btf: sys.NewSlicePointer(btf),
- BtfSize: uint32(len(btf)),
+// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
+// used as a proxy for .bss, .data and .rodata map support, which generally
+// come with a Var and Datasec. These were introduced in Linux 5.2.
+var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error {
+ if err := haveBTF(); err != nil {
+ return err
}
- fd, err := sys.BtfLoad(attr)
- if err != nil {
- logBuf := make([]byte, 64*1024)
- attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
- attr.BtfLogSize = uint32(len(logBuf))
- attr.BtfLogLevel = 1
- // NB: The syscall will never return ENOSPC as of 5.18-rc4.
- _, _ = sys.BtfLoad(attr)
- return nil, internal.ErrorWithLog(err, logBuf)
+ v := &Var{
+ Name: "a",
+ Type: &Pointer{(*Void)(nil)},
}
- return &Handle{fd, attr.BtfSize}, nil
-}
-
-// NewHandleFromID returns the BTF handle for a given id.
-//
-// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
-//
-// Returns ErrNotExist, if there is no BTF with the given id.
-//
-// Requires CAP_SYS_ADMIN.
-func NewHandleFromID(id ID) (*Handle, error) {
- fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
- Id: uint32(id),
- })
- if err != nil {
- return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
+ err := probeBTF(v)
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: creating the map may still
+ // succeed without Btf* attrs.
+ return internal.ErrNotSupported
}
+ return err
+})
- info, err := newHandleInfoFromFD(fd)
- if err != nil {
- _ = fd.Close()
- return nil, err
+// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
+// is used as a proxy for ext_info (func_info) support, which depends on
+// Func(Proto) by definition.
+var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error {
+ if err := haveBTF(); err != nil {
+ return err
}
- return &Handle{fd, info.size}, nil
-}
-
-// Spec parses the kernel BTF into Go types.
-//
-// base is used to decode split BTF and may be nil.
-func (h *Handle) Spec(base *Spec) (*Spec, error) {
- var btfInfo sys.BtfInfo
- btfBuffer := make([]byte, h.size)
- btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
-
- if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
- return nil, err
+ fn := &Func{
+ Name: "a",
+ Type: &FuncProto{Return: (*Void)(nil)},
}
- var baseTypes types
- var baseStrings *stringTable
- if base != nil {
- baseTypes = base.types
- baseStrings = base.strings
+ err := probeBTF(fn)
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ return internal.ErrNotSupported
}
+ return err
+})
- return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
-}
-
-// Close destroys the handle.
-//
-// Subsequent calls to FD will return an invalid value.
-func (h *Handle) Close() error {
- if h == nil {
- return nil
+var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveProgBTF(); err != nil {
+ return err
}
- return h.fd.Close()
-}
-
-// FD returns the file descriptor for the handle.
-func (h *Handle) FD() int {
- return h.fd.Int()
-}
-
-// Info returns metadata about the handle.
-func (h *Handle) Info() (*HandleInfo, error) {
- return newHandleInfoFromFD(h.fd)
-}
-
-func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
- const minHeaderLength = 24
-
- typesLen := uint32(binary.Size(types))
- header := btfHeader{
- Magic: btfMagic,
- Version: 1,
- HdrLen: minHeaderLength,
- TypeOff: 0,
- TypeLen: typesLen,
- StringOff: typesLen,
- StringLen: uint32(len(strings)),
+ fn := &Func{
+ Name: "a",
+ Type: &FuncProto{Return: (*Void)(nil)},
+ Linkage: GlobalFunc,
}
- buf := new(bytes.Buffer)
- _ = binary.Write(buf, bo, &header)
- _ = binary.Write(buf, bo, types)
- buf.Write(strings)
-
- return buf.Bytes()
-}
-
-var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
- var (
- types struct {
- Integer btfType
- Var btfType
- btfVar struct{ Linkage uint32 }
- }
- strings = []byte{0, 'a', 0}
- )
-
- // We use a BTF_KIND_VAR here, to make sure that
- // the kernel understands BTF at least as well as we
- // do. BTF_KIND_VAR was introduced ~5.1.
- types.Integer.SetKind(kindPointer)
- types.Var.NameOff = 1
- types.Var.SetKind(kindVar)
- types.Var.SizeType = 1
-
- btf := marshalBTF(&types, strings, internal.NativeEndian)
-
- fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
- Btf: sys.NewSlicePointer(btf),
- BtfSize: uint32(len(btf)),
- })
- if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
- // Treat both EINVAL and EPERM as not supported: loading the program
- // might still succeed without BTF.
+ err := probeBTF(fn)
+ if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
+ return err
+})
+
+func probeBTF(typ Type) error {
+ b, err := NewBuilder([]Type{typ})
if err != nil {
return err
}
- fd.Close()
- return nil
-})
-
-var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
- if err := haveBTF(); err != nil {
+ buf, err := b.Marshal(nil, nil)
+ if err != nil {
return err
}
- var (
- types struct {
- FuncProto btfType
- Func btfType
- }
- strings = []byte{0, 'a', 0}
- )
-
- types.FuncProto.SetKind(kindFuncProto)
- types.Func.SetKind(kindFunc)
- types.Func.SizeType = 1 // aka FuncProto
- types.Func.NameOff = 1
- types.Func.SetLinkage(GlobalFunc)
-
- btf := marshalBTF(&types, strings, internal.NativeEndian)
-
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
- Btf: sys.NewSlicePointer(btf),
- BtfSize: uint32(len(btf)),
+ Btf: sys.NewSlicePointer(buf),
+ BtfSize: uint32(len(buf)),
})
- if errors.Is(err, unix.EINVAL) {
- return internal.ErrNotSupported
- }
- if err != nil {
- return err
+
+ if err == nil {
+ fd.Close()
}
- fd.Close()
- return nil
-})
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go
index 4810180494e6..a253b7c9b9e7 100644
--- a/vendor/github.com/cilium/ebpf/btf/btf_types.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go
@@ -4,35 +4,41 @@ import (
"encoding/binary"
"fmt"
"io"
+ "unsafe"
)
-//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
+//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind
// btfKind describes a Type.
type btfKind uint8
// Equivalents of the BTF_KIND_* constants.
const (
- kindUnknown btfKind = iota
- kindInt
- kindPointer
- kindArray
- kindStruct
- kindUnion
- kindEnum
- kindForward
- kindTypedef
- kindVolatile
- kindConst
- kindRestrict
+ kindUnknown btfKind = iota // Unknown
+ kindInt // Int
+ kindPointer // Pointer
+ kindArray // Array
+ kindStruct // Struct
+ kindUnion // Union
+ kindEnum // Enum
+ kindForward // Forward
+ kindTypedef // Typedef
+ kindVolatile // Volatile
+ kindConst // Const
+ kindRestrict // Restrict
// Added ~4.20
- kindFunc
- kindFuncProto
+ kindFunc // Func
+ kindFuncProto // FuncProto
// Added ~5.1
- kindVar
- kindDatasec
+ kindVar // Var
+ kindDatasec // Datasec
// Added ~5.13
- kindFloat
+ kindFloat // Float
+ // Added 5.16
+ kindDeclTag // DeclTag
+ kindTypeTag // TypeTag
+ // Added 6.0
+ kindEnum64 // Enum64
)
// FuncLinkage describes BTF function linkage metadata.
@@ -63,6 +69,8 @@ const (
btfTypeKindFlagMask = 1
)
+var btfTypeLen = binary.Size(btfType{})
+
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
type btfType struct {
NameOff uint32
@@ -85,47 +93,6 @@ type btfType struct {
SizeType uint32
}
-func (k btfKind) String() string {
- switch k {
- case kindUnknown:
- return "Unknown"
- case kindInt:
- return "Integer"
- case kindPointer:
- return "Pointer"
- case kindArray:
- return "Array"
- case kindStruct:
- return "Struct"
- case kindUnion:
- return "Union"
- case kindEnum:
- return "Enumeration"
- case kindForward:
- return "Forward"
- case kindTypedef:
- return "Typedef"
- case kindVolatile:
- return "Volatile"
- case kindConst:
- return "Const"
- case kindRestrict:
- return "Restrict"
- case kindFunc:
- return "Function"
- case kindFuncProto:
- return "Function Proto"
- case kindVar:
- return "Variable"
- case kindDatasec:
- return "Section"
- case kindFloat:
- return "Float"
- default:
- return fmt.Sprintf("Unknown (%d)", k)
- }
-}
-
func mask(len uint32) uint32 {
return (1 << len) - 1
}
@@ -164,10 +131,43 @@ func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
-func (bt *btfType) KindFlag() bool {
+func (bt *btfType) kindFlagBool() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
+func (bt *btfType) setKindFlagBool(set bool) {
+ var value uint32
+ if set {
+ value = 1
+ }
+ bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift)
+}
+
+// Bitfield returns true if the struct or union contain a bitfield.
+func (bt *btfType) Bitfield() bool {
+ return bt.kindFlagBool()
+}
+
+func (bt *btfType) SetBitfield(isBitfield bool) {
+ bt.setKindFlagBool(isBitfield)
+}
+
+func (bt *btfType) FwdKind() FwdKind {
+ return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift))
+}
+
+func (bt *btfType) SetFwdKind(kind FwdKind) {
+ bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift)
+}
+
+func (bt *btfType) Signed() bool {
+ return bt.kindFlagBool()
+}
+
+func (bt *btfType) SetSigned(signed bool) {
+ bt.setKindFlagBool(signed)
+}
+
func (bt *btfType) Linkage() FuncLinkage {
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
@@ -181,6 +181,10 @@ func (bt *btfType) Type() TypeID {
return TypeID(bt.SizeType)
}
+func (bt *btfType) SetType(id TypeID) {
+ bt.SizeType = uint32(id)
+}
+
func (bt *btfType) Size() uint32 {
// TODO: Panic here if wrong kind?
return bt.SizeType
@@ -190,13 +194,22 @@ func (bt *btfType) SetSize(size uint32) {
bt.SizeType = size
}
+func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ buf := make([]byte, unsafe.Sizeof(*bt))
+ bo.PutUint32(buf[0:], bt.NameOff)
+ bo.PutUint32(buf[4:], bt.Info)
+ bo.PutUint32(buf[8:], bt.SizeType)
+ _, err := w.Write(buf)
+ return err
+}
+
type rawType struct {
btfType
data interface{}
}
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
- if err := binary.Write(w, bo, &rt.btfType); err != nil {
+ if err := rt.btfType.Marshal(w, bo); err != nil {
return err
}
@@ -209,11 +222,11 @@ func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
// btfInt encodes additional data for integers.
//
-// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
-// ? = undefined
-// e = encoding
-// o = offset (bitfields?)
-// b = bits (bitfields)
+// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
+// ? = undefined
+// e = encoding
+// o = offset (bitfields?)
+// b = bits (bitfields)
type btfInt struct {
Raw uint32
}
@@ -275,7 +288,13 @@ type btfVariable struct {
type btfEnum struct {
NameOff uint32
- Val int32
+ Val uint32
+}
+
+type btfEnum64 struct {
+ NameOff uint32
+ ValLo32 uint32
+ ValHi32 uint32
}
type btfParam struct {
@@ -283,12 +302,16 @@ type btfParam struct {
Type TypeID
}
+type btfDeclTag struct {
+ ComponentIdx uint32
+}
+
func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
var header btfType
// because of the interleaving between types and struct members it is difficult to
// precompute the numbers of raw types this will parse
// this "guess" is a good first estimation
- sizeOfbtfType := uintptr(binary.Size(btfType{}))
+ sizeOfbtfType := uintptr(btfTypeLen)
tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
types := make([]rawType, 0, tyMaxCount)
@@ -325,6 +348,11 @@ func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, err
case kindDatasec:
data = make([]btfVarSecinfo, header.Vlen())
case kindFloat:
+ case kindDeclTag:
+ data = new(btfDeclTag)
+ case kindTypeTag:
+ case kindEnum64:
+ data = make([]btfEnum64, header.Vlen())
default:
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
index 0e0c17d68ba9..b7a1b80d1518 100644
--- a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
@@ -1,4 +1,4 @@
-// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
+// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT.
package btf
@@ -42,3 +42,39 @@ func (i VarLinkage) String() string {
}
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[kindUnknown-0]
+ _ = x[kindInt-1]
+ _ = x[kindPointer-2]
+ _ = x[kindArray-3]
+ _ = x[kindStruct-4]
+ _ = x[kindUnion-5]
+ _ = x[kindEnum-6]
+ _ = x[kindForward-7]
+ _ = x[kindTypedef-8]
+ _ = x[kindVolatile-9]
+ _ = x[kindConst-10]
+ _ = x[kindRestrict-11]
+ _ = x[kindFunc-12]
+ _ = x[kindFuncProto-13]
+ _ = x[kindVar-14]
+ _ = x[kindDatasec-15]
+ _ = x[kindFloat-16]
+ _ = x[kindDeclTag-17]
+ _ = x[kindTypeTag-18]
+ _ = x[kindEnum64-19]
+}
+
+const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64"
+
+var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120}
+
+func (i btfKind) String() string {
+ if i >= btfKind(len(_btfKind_index)-1) {
+ return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
index c48754809355..a5c40d36af46 100644
--- a/vendor/github.com/cilium/ebpf/btf/core.go
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -156,16 +156,25 @@ func (k coreKind) String() string {
}
}
-// CORERelocate calculates the difference in types between local and target.
+// CORERelocate calculates changes needed to adjust eBPF instructions for differences
+// in types.
//
// Returns a list of fixups which can be applied to instructions to make them
// match the target type(s).
//
// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
// for relos[i].
-func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
- if local.byteOrder != target.byteOrder {
- return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
+func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) {
+ if target == nil {
+ var err error
+ target, _, err = kernelSpec()
+ if err != nil {
+ return nil, fmt.Errorf("load kernel spec: %w", err)
+ }
+ }
+
+ if bo != target.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder)
}
type reloGroup struct {
@@ -185,15 +194,14 @@ func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, er
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
- id, err := local.TypeID(relo.typ)
- if err != nil {
- return nil, fmt.Errorf("%s: %w", relo.kind, err)
- }
-
result[i] = COREFixup{
- kind: relo.kind,
- local: uint32(id),
- target: uint32(id),
+ kind: relo.kind,
+ local: uint32(relo.id),
+ // NB: Using relo.id as the target here is incorrect, since
+ // it doesn't match the BTF we generate on the fly. This isn't
+ // too bad for now since there are no uses of the local type ID
+ // in the kernel, yet.
+ target: uint32(relo.id),
}
continue
}
@@ -214,7 +222,7 @@ func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, er
}
targets := target.namedTypes[newEssentialName(localTypeName)]
- fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
+ fixups, err := coreCalculateFixups(group.relos, target, targets, bo)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
@@ -229,34 +237,29 @@ func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, er
var errAmbiguousRelocation = errors.New("ambiguous relocation")
var errImpossibleRelocation = errors.New("impossible relocation")
+var errIncompatibleTypes = errors.New("incompatible types")
-// coreCalculateFixups calculates the fixups for the given relocations using
-// the "best" target.
+// coreCalculateFixups finds the target type that best matches all relocations.
+//
+// All relos must target the same type.
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
-func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
- localID, err := localSpec.TypeID(local)
- if err != nil {
- return nil, fmt.Errorf("local type ID: %w", err)
- }
- local = Copy(local, UnderlyingType)
-
+func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) {
bestScore := len(relos)
var bestFixups []COREFixup
- for i := range targets {
- targetID, err := targetSpec.TypeID(targets[i])
+ for _, target := range targets {
+ targetID, err := targetSpec.TypeID(target)
if err != nil {
return nil, fmt.Errorf("target type ID: %w", err)
}
- target := Copy(targets[i], UnderlyingType)
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
- fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
+ fixup, err := coreCalculateFixup(relo, target, targetID, bo)
if err != nil {
- return nil, fmt.Errorf("target %s: %w", target, err)
+ return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err)
}
if fixup.poison || fixup.isNonExistant() {
score++
@@ -303,9 +306,11 @@ func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type
return bestFixups, nil
}
+var errNoSignedness = errors.New("no signedness")
+
// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
-func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
+func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{kind: relo.kind, local: local, target: target}, nil
}
@@ -320,18 +325,20 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
}
zero := COREFixup{}
+ local := relo.typ
+
switch relo.kind {
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
- return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
}
err := coreAreTypesCompatible(local, target)
- if errors.Is(err, errImpossibleRelocation) {
+ if errors.Is(err, errIncompatibleTypes) {
return poison()
}
if err != nil {
- return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
+ return zero, err
}
switch relo.kind {
@@ -339,7 +346,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return fixup(1, 1)
case reloTypeIDTarget:
- return fixup(uint32(localID), uint32(targetID))
+ return fixup(uint32(relo.id), uint32(targetID))
case reloTypeSize:
localSize, err := Sizeof(local)
@@ -361,7 +368,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return poison()
}
if err != nil {
- return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
+ return zero, err
}
switch relo.kind {
@@ -372,21 +379,8 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}
- case reloFieldSigned:
- switch local.(type) {
- case *Enum:
- return fixup(1, 1)
- case *Int:
- return fixup(
- uint32(local.(*Int).Encoding&Signed),
- uint32(target.(*Int).Encoding&Signed),
- )
- default:
- return fixupWithoutValidation(0, 0)
- }
-
- case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
- if _, ok := target.(*Fwd); ok {
+ case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned:
+ if _, ok := as[*Fwd](target); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
// we'll find it in one of the other iterations.
@@ -398,7 +392,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return poison()
}
if err != nil {
- return zero, fmt.Errorf("target %s: %w", target, err)
+ return zero, err
}
maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
@@ -427,7 +421,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
case reloFieldLShiftU64:
var target uint32
- if byteOrder == binary.LittleEndian {
+ if bo == binary.LittleEndian {
targetSize, err := targetField.sizeBits()
if err != nil {
return zero, err
@@ -451,10 +445,40 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
}
return fixupWithoutValidation(0, uint32(64-targetSize))
+
+ case reloFieldSigned:
+ switch local := UnderlyingType(localField.Type).(type) {
+ case *Enum:
+ target, ok := as[*Enum](targetField.Type)
+ if !ok {
+ return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type)
+ }
+
+ return fixup(boolToUint32(local.Signed), boolToUint32(target.Signed))
+ case *Int:
+ target, ok := as[*Int](targetField.Type)
+ if !ok {
+ return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type)
+ }
+
+ return fixup(
+ uint32(local.Encoding&Signed),
+ uint32(target.Encoding&Signed),
+ )
+ default:
+ return zero, fmt.Errorf("type %T: %w", local, errNoSignedness)
+ }
}
}
- return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
+ return zero, ErrNotSupported
+}
+
+func boolToUint32(val bool) uint32 {
+ if val {
+ return 1
+ }
+ return 0
}
/* coreAccessor contains a path through a struct. It contains at least one index.
@@ -516,7 +540,7 @@ func (ca coreAccessor) String() string {
}
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
- e, ok := t.(*Enum)
+ e, ok := as[*Enum](t)
if !ok {
return nil, fmt.Errorf("not an enum: %s", t)
}
@@ -536,9 +560,9 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
// coreField represents the position of a "child" of a composite type from the
// start of that type.
//
-// /- start of composite
-// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
-// \- start of field end of field -/
+// /- start of composite
+// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
+// \- start of field end of field -/
type coreField struct {
Type Type
@@ -555,6 +579,10 @@ type coreField struct {
}
func (cf *coreField) adjustOffsetToNthElement(n int) error {
+ if n == 0 {
+ return nil
+ }
+
size, err := Sizeof(cf.Type)
if err != nil {
return err
@@ -597,7 +625,7 @@ func (cf *coreField) sizeBits() (Bits, error) {
// between kernel versions. Synthesise the size to make the shifts work.
size, err := Sizeof(cf.Type)
if err != nil {
- return 0, nil
+ return 0, err
}
return Bits(size * 8), nil
}
@@ -611,6 +639,10 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
local := coreField{Type: localT}
target := coreField{Type: targetT}
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
+ }
+
// The first index is used to offset a pointer of the base type like
// when accessing an array.
if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
@@ -621,13 +653,9 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
return coreField{}, coreField{}, err
}
- if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
- return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
- }
-
var localMaybeFlex, targetMaybeFlex bool
for i, acc := range localAcc[1:] {
- switch localType := local.Type.(type) {
+ switch localType := UnderlyingType(local.Type).(type) {
case composite:
// For composite types acc is used to find the field in the local type,
// and then we try to find a field in target with the same name.
@@ -638,21 +666,21 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
localMember := localMembers[acc]
if localMember.Name == "" {
- _, ok := localMember.Type.(composite)
+ localMemberType, ok := as[composite](localMember.Type)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
}
// This is an anonymous struct or union, ignore it.
local = coreField{
- Type: localMember.Type,
+ Type: localMemberType,
offset: local.offset + localMember.Offset.Bytes(),
}
localMaybeFlex = false
continue
}
- targetType, ok := target.Type.(composite)
+ targetType, ok := as[composite](target.Type)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
}
@@ -698,7 +726,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
case *Array:
// For arrays, acc is the index in the target.
- targetType, ok := target.Type.(*Array)
+ targetType, ok := as[*Array](target.Type)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
}
@@ -792,7 +820,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
continue
}
- comp, ok := member.Type.(composite)
+ comp, ok := as[composite](member.Type)
if !ok {
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
}
@@ -811,7 +839,7 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
return nil, nil, err
}
- targetEnum, ok := target.(*Enum)
+ targetEnum, ok := as[*Enum](target)
if !ok {
return nil, nil, errImpossibleRelocation
}
@@ -828,6 +856,13 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
return nil, nil, errImpossibleRelocation
}
+// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules.
+//
+// Only layout compatibility is checked, ignoring names of the root type.
+func CheckTypeCompatibility(localType Type, targetType Type) error {
+ return coreAreTypesCompatible(localType, targetType)
+}
+
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
*
* Check local and target types for compatibility. This check is used for
@@ -849,25 +884,26 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*
- * Returns errImpossibleRelocation if types are not compatible.
+ * Returns errIncompatibleTypes if types are not compatible.
*/
func coreAreTypesCompatible(localType Type, targetType Type) error {
+
var (
localTs, targetTs typeDeque
l, t = &localType, &targetType
depth = 0
)
- for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
+ for ; l != nil && t != nil; l, t = localTs.Shift(), targetTs.Shift() {
if depth >= maxTypeDepth {
return errors.New("types are nested too deep")
}
- localType = *l
- targetType = *t
+ localType = UnderlyingType(*l)
+ targetType = UnderlyingType(*t)
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
- return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ return fmt.Errorf("type mismatch: %w", errIncompatibleTypes)
}
switch lv := (localType).(type) {
@@ -876,18 +912,18 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
case *Pointer, *Array:
depth++
- localType.walk(&localTs)
- targetType.walk(&targetTs)
+ walkType(localType, localTs.Push)
+ walkType(targetType, targetTs.Push)
case *FuncProto:
tv := targetType.(*FuncProto)
if len(lv.Params) != len(tv.Params) {
- return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
+ return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
}
depth++
- localType.walk(&localTs)
- targetType.walk(&targetTs)
+ walkType(localType, localTs.Push)
+ walkType(targetType, targetTs.Push)
default:
return fmt.Errorf("unsupported type %T", localType)
@@ -931,6 +967,9 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
* Returns errImpossibleRelocation if the members are not compatible.
*/
func coreAreMembersCompatible(localType Type, targetType Type) error {
+ localType = UnderlyingType(localType)
+ targetType = UnderlyingType(targetType)
+
doNamesMatch := func(a, b string) error {
if a == "" || b == "" {
// allow anonymous and named type to match
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
index 2c0e1afe299a..b764fb7bcc1b 100644
--- a/vendor/github.com/cilium/ebpf/btf/ext_info.go
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -24,7 +24,7 @@ type ExtInfos struct {
// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
//
// Returns an error wrapping ErrNotFound if no ext infos are present.
-func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
+func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) {
section := file.Section(".BTF.ext")
if section == nil {
return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
@@ -34,11 +34,11 @@ func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTa
return nil, fmt.Errorf("compressed ext_info is not supported")
}
- return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
+ return loadExtInfos(section.ReaderAt, file.ByteOrder, spec, spec.strings)
}
// loadExtInfos parses bare ext infos.
-func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
+func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec, strings *stringTable) (*ExtInfos, error) {
// Open unbuffered section reader. binary.Read() calls io.ReadFull on
// the header structs, resulting in one syscall per header.
headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
@@ -60,7 +60,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringT
funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
for section, bfis := range btfFuncInfos {
- funcInfos[section], err = newFuncInfos(bfis, ts)
+ funcInfos[section], err = newFuncInfos(bfis, spec)
if err != nil {
return nil, fmt.Errorf("section %s: func infos: %w", section, err)
}
@@ -93,7 +93,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringT
coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
for section, brs := range btfCORERelos {
- coreRelos[section], err = newRelocationInfos(brs, ts, strings)
+ coreRelos[section], err = newRelocationInfos(brs, spec, strings)
if err != nil {
return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
}
@@ -114,7 +114,7 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
iter := insns.Iterate()
for iter.Next() {
if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
- iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
+ *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].fn)
funcInfos = funcInfos[1:]
}
@@ -132,17 +132,37 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
// MarshalExtInfos encodes function and line info embedded in insns into kernel
// wire format.
-func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
+//
+// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs.
+func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) {
+ // Bail out early if the kernel doesn't support Func(Proto). If this is the
+ // case, func_info will also be unsupported.
+ if err := haveProgBTF(); err != nil {
+ return nil, nil, nil, err
+ }
+
iter := insns.Iterate()
- var fiBuf, liBuf bytes.Buffer
for iter.Next() {
+ _, ok := iter.Ins.Source().(*Line)
+ fn := FuncMetadata(iter.Ins)
+ if ok || fn != nil {
+ goto marshal
+ }
+ }
+
+ return nil, nil, nil, nil
+
+marshal:
+ var b Builder
+ var fiBuf, liBuf bytes.Buffer
+ for {
if fn := FuncMetadata(iter.Ins); fn != nil {
fi := &funcInfo{
fn: fn,
offset: iter.Offset,
}
- if err := fi.marshal(&fiBuf, typeID); err != nil {
- return nil, nil, fmt.Errorf("write func info: %w", err)
+ if err := fi.marshal(&fiBuf, &b); err != nil {
+ return nil, nil, nil, fmt.Errorf("write func info: %w", err)
}
}
@@ -151,12 +171,18 @@ func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error))
line: line,
offset: iter.Offset,
}
- if err := li.marshal(&liBuf); err != nil {
- return nil, nil, fmt.Errorf("write line info: %w", err)
+ if err := li.marshal(&liBuf, &b); err != nil {
+ return nil, nil, nil, fmt.Errorf("write line info: %w", err)
}
}
+
+ if !iter.Next() {
+ break
+ }
}
- return fiBuf.Bytes(), liBuf.Bytes(), nil
+
+ handle, err := NewHandle(&b)
+ return handle, fiBuf.Bytes(), liBuf.Bytes(), err
}
// btfExtHeader is found at the start of the .BTF.ext section.
@@ -311,8 +337,8 @@ type bpfFuncInfo struct {
TypeID TypeID
}
-func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
- typ, err := ts.ByID(fi.TypeID)
+func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) {
+ typ, err := spec.TypeByID(fi.TypeID)
if err != nil {
return nil, err
}
@@ -333,10 +359,10 @@ func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
}, nil
}
-func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
+func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) ([]funcInfo, error) {
fis := make([]funcInfo, 0, len(bfis))
for _, bfi := range bfis {
- fi, err := newFuncInfo(bfi, ts)
+ fi, err := newFuncInfo(bfi, spec)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
}
@@ -349,8 +375,8 @@ func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
}
// marshal into the BTF wire format.
-func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
- id, err := typeID(fi.fn)
+func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error {
+ id, err := b.Add(fi.fn)
if err != nil {
return err
}
@@ -358,10 +384,14 @@ func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) erro
InsnOff: uint32(fi.offset),
TypeID: id,
}
- return binary.Write(w, internal.NativeEndian, &bfi)
+ buf := make([]byte, FuncInfoSize)
+ internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
+ internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
+ _, err = w.Write(buf)
+ return err
}
-// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
+// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of
// func infos indexed by section name.
func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
@@ -428,12 +458,6 @@ type Line struct {
line string
lineNumber uint32
lineColumn uint32
-
- // TODO: We should get rid of the fields below, but for that we need to be
- // able to write BTF.
-
- fileNameOff uint32
- lineOff uint32
}
func (li *Line) FileName() string {
@@ -496,8 +520,6 @@ func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
line,
lineNumber,
lineColumn,
- li.FileNameOff,
- li.LineOff,
},
asm.RawInstructionOffset(li.InsnOff),
}, nil
@@ -519,7 +541,7 @@ func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error)
}
// marshal writes the binary representation of the LineInfo to w.
-func (li *lineInfo) marshal(w io.Writer) error {
+func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error {
line := li.line
if line.lineNumber > bpfLineMax {
return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
@@ -529,13 +551,30 @@ func (li *lineInfo) marshal(w io.Writer) error {
return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
}
+ fileNameOff, err := b.addString(line.fileName)
+ if err != nil {
+ return fmt.Errorf("file name %q: %w", line.fileName, err)
+ }
+
+ lineOff, err := b.addString(line.line)
+ if err != nil {
+ return fmt.Errorf("line %q: %w", line.line, err)
+ }
+
bli := bpfLineInfo{
uint32(li.offset),
- line.fileNameOff,
- line.lineOff,
+ fileNameOff,
+ lineOff,
(line.lineNumber << bpfLineShift) | line.lineColumn,
}
- return binary.Write(w, internal.NativeEndian, &bli)
+
+ buf := make([]byte, LineInfoSize)
+ internal.NativeEndian.PutUint32(buf, bli.InsnOff)
+ internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
+ internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
+ internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
+ _, err = w.Write(buf)
+ return err
}
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
@@ -605,9 +644,16 @@ type bpfCORERelo struct {
}
type CORERelocation struct {
+ // The local type of the relocation, stripped of typedefs and qualifiers.
typ Type
accessor coreAccessor
kind coreKind
+ // The ID of the local type in the source BTF.
+ id TypeID
+}
+
+func (cr *CORERelocation) String() string {
+ return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id)
}
func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
@@ -620,8 +666,8 @@ type coreRelocationInfo struct {
offset asm.RawInstructionOffset
}
-func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
- typ, err := ts.ByID(relo.TypeID)
+func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) {
+ typ, err := spec.TypeByID(relo.TypeID)
if err != nil {
return nil, err
}
@@ -641,15 +687,16 @@ func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreR
typ,
accessor,
relo.Kind,
+ relo.TypeID,
},
asm.RawInstructionOffset(relo.InsnOff),
}, nil
}
-func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
+func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) ([]coreRelocationInfo, error) {
rs := make([]coreRelocationInfo, 0, len(brs))
for _, br := range brs {
- relo, err := newRelocationInfo(br, ts, strings)
+ relo, err := newRelocationInfo(br, spec, strings)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
}
diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go
index e7688a2a6e83..e85220259e74 100644
--- a/vendor/github.com/cilium/ebpf/btf/format.go
+++ b/vendor/github.com/cilium/ebpf/btf/format.go
@@ -56,54 +56,40 @@ func (gf *GoFormatter) enumIdentifier(name, element string) string {
//
// It encodes https://golang.org/ref/spec#Type_declarations:
//
-// type foo struct { bar uint32; }
-// type bar int32
+// type foo struct { bar uint32; }
+// type bar int32
func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
if name == "" {
return fmt.Errorf("need a name for type %s", typ)
}
- switch v := skipQualifiers(typ).(type) {
- case *Enum:
- fmt.Fprintf(&gf.w, "type %s ", name)
- switch v.Size {
- case 1:
- gf.w.WriteString("int8")
- case 2:
- gf.w.WriteString("int16")
- case 4:
- gf.w.WriteString("int32")
- case 8:
- gf.w.WriteString("int64")
- default:
- return fmt.Errorf("%s: invalid enum size %d", typ, v.Size)
- }
-
- if len(v.Values) == 0 {
- return nil
- }
-
- gf.w.WriteString("; const ( ")
- for _, ev := range v.Values {
- id := gf.enumIdentifier(name, ev.Name)
- fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
- }
- gf.w.WriteString(")")
+ typ = skipQualifiers(typ)
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ if err := gf.writeTypeLit(typ, 0); err != nil {
+ return err
+ }
+ e, ok := typ.(*Enum)
+ if !ok || len(e.Values) == 0 {
return nil
+ }
- default:
- fmt.Fprintf(&gf.w, "type %s ", name)
- return gf.writeTypeLit(v, 0)
+ gf.w.WriteString("; const ( ")
+ for _, ev := range e.Values {
+ id := gf.enumIdentifier(name, ev.Name)
+ fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
}
+ gf.w.WriteString(")")
+
+ return nil
}
// writeType outputs the name of a named type or a literal describing the type.
//
// It encodes https://golang.org/ref/spec#Types.
//
-// foo (if foo is a named type)
-// uint32
+// foo (if foo is a named type)
+// uint32
func (gf *GoFormatter) writeType(typ Type, depth int) error {
typ = skipQualifiers(typ)
@@ -122,8 +108,8 @@ func (gf *GoFormatter) writeType(typ Type, depth int) error {
//
// It encodes https://golang.org/ref/spec#TypeLit.
//
-// struct { bar uint32; }
-// uint32
+// struct { bar uint32; }
+// uint32
func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
depth++
if depth > maxTypeDepth {
@@ -133,10 +119,24 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
var err error
switch v := skipQualifiers(typ).(type) {
case *Int:
- gf.writeIntLit(v)
+ err = gf.writeIntLit(v)
case *Enum:
- gf.w.WriteString("int32")
+ if !v.Signed {
+ gf.w.WriteRune('u')
+ }
+ switch v.Size {
+ case 1:
+ gf.w.WriteString("int8")
+ case 2:
+ gf.w.WriteString("int16")
+ case 4:
+ gf.w.WriteString("int32")
+ case 8:
+ gf.w.WriteString("int64")
+ default:
+ err = fmt.Errorf("invalid enum size %d", v.Size)
+ }
case *Typedef:
err = gf.writeType(v.Type, depth)
@@ -166,19 +166,36 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
return nil
}
-func (gf *GoFormatter) writeIntLit(i *Int) {
- // NB: Encoding.IsChar is ignored.
- if i.Encoding.IsBool() && i.Size == 1 {
- gf.w.WriteString("bool")
- return
- }
-
+func (gf *GoFormatter) writeIntLit(i *Int) error {
bits := i.Size * 8
- if i.Encoding.IsSigned() {
- fmt.Fprintf(&gf.w, "int%d", bits)
- } else {
- fmt.Fprintf(&gf.w, "uint%d", bits)
+ switch i.Encoding {
+ case Bool:
+ if i.Size != 1 {
+ return fmt.Errorf("bool with size %d", i.Size)
+ }
+ gf.w.WriteString("bool")
+ case Char:
+ if i.Size != 1 {
+ return fmt.Errorf("char with size %d", i.Size)
+ }
+ // BTF doesn't have a way to specify the signedness of a char. Assume
+ // we are dealing with unsigned, since this works nicely with []byte
+ // in Go code.
+ fallthrough
+ case Unsigned, Signed:
+ stem := "uint"
+ if i.Encoding == Signed {
+ stem = "int"
+ }
+ if i.Size > 8 {
+ fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8)
+ } else {
+ fmt.Fprintf(&gf.w, "%s%d", stem, bits)
+ }
+ default:
+ return fmt.Errorf("can't encode %s", i.Encoding)
}
+ return nil
}
func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
@@ -199,11 +216,15 @@ func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int)
gf.writePadding(n)
}
- size, err := Sizeof(m.Type)
+ fieldSize, err := Sizeof(m.Type)
if err != nil {
return fmt.Errorf("field %d: %w", i, err)
}
- prevOffset = offset + uint32(size)
+
+ prevOffset = offset + uint32(fieldSize)
+ if prevOffset > size {
+ return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size)
+ }
if err := gf.writeStructField(m, depth); err != nil {
return fmt.Errorf("field %d: %w", i, err)
@@ -272,7 +293,11 @@ func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
prevOffset := uint32(0)
for i, vsi := range ds.Vars {
- v := vsi.Type.(*Var)
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ return fmt.Errorf("can't format %s as part of data section", vsi.Type)
+ }
+
if v.Linkage != GlobalVar {
// Ignore static, extern, etc. for now.
continue
diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go
index 128e9b35cf3b..b6b3e87f504f 100644
--- a/vendor/github.com/cilium/ebpf/btf/handle.go
+++ b/vendor/github.com/cilium/ebpf/btf/handle.go
@@ -1,14 +1,142 @@
package btf
import (
+ "bytes"
"errors"
"fmt"
+ "math"
"os"
+ "github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *sys.FD
+
+ // Size of the raw BTF in bytes.
+ size uint32
+
+ needsKernelBase bool
+}
+
+// NewHandle loads the contents of a [Builder] into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandle(b *Builder) (*Handle, error) {
+ small := getByteSlice()
+ defer putByteSlice(small)
+
+ buf, err := b.Marshal(*small, KernelMarshalOptions())
+ if err != nil {
+ return nil, fmt.Errorf("marshal BTF: %w", err)
+ }
+
+ return NewHandleFromRawBTF(buf)
+}
+
+// NewHandleFromRawBTF loads raw BTF into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ }
+
+ fd, err := sys.BtfLoad(attr)
+ if err == nil {
+ return &Handle{fd, attr.BtfSize, false}, nil
+ }
+
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ logBuf := make([]byte, 64*1024)
+ attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
+ attr.BtfLogSize = uint32(len(logBuf))
+ attr.BtfLogLevel = 1
+
+ // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
+ // if there are other verification errors. ENOSPC is only returned when
+ // the BTF blob is correct, a log was requested, and the provided buffer
+ // is too small.
+ _, ve := sys.BtfLoad(attr)
+ return nil, internal.ErrorWithLog("load btf", err, logBuf, errors.Is(ve, unix.ENOSPC))
+}
+
+// NewHandleFromID returns the BTF handle for a given id.
+//
+// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
+//
+// Returns ErrNotExist, if there is no BTF with the given id.
+//
+// Requires CAP_SYS_ADMIN.
+func NewHandleFromID(id ID) (*Handle, error) {
+ fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
+ }
+
+ info, err := newHandleInfoFromFD(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, err
+ }
+
+ return &Handle{fd, info.size, info.IsModule()}, nil
+}
+
+// Spec parses the kernel BTF into Go types.
+//
+// base must contain type information for vmlinux if the handle is for
+// a kernel module. It may be nil otherwise.
+func (h *Handle) Spec(base *Spec) (*Spec, error) {
+ var btfInfo sys.BtfInfo
+ btfBuffer := make([]byte, h.size)
+ btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
+
+ if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ if h.needsKernelBase && base == nil {
+ return nil, fmt.Errorf("missing base types")
+ }
+
+ return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ if h == nil {
+ return nil
+ }
+
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ return h.fd.Int()
+}
+
+// Info returns metadata about the handle.
+func (h *Handle) Info() (*HandleInfo, error) {
+ return newHandleInfoFromFD(h.fd)
+}
+
// HandleInfo describes a Handle.
type HandleInfo struct {
// ID of this handle in the kernel. The ID is only valid as long as the
@@ -59,7 +187,7 @@ func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
}, nil
}
-// IsModule returns true if the BTF is for the kernel itself.
+// IsVmlinux returns true if the BTF is for the kernel itself.
func (i *HandleInfo) IsVmlinux() bool {
return i.IsKernel && i.Name == "vmlinux"
}
@@ -71,51 +199,89 @@ func (i *HandleInfo) IsModule() bool {
// HandleIterator allows enumerating BTF blobs loaded into the kernel.
type HandleIterator struct {
- // The ID of the last retrieved handle. Only valid after a call to Next.
- ID ID
- err error
+ // The ID of the current handle. Only valid after a call to Next.
+ ID ID
+ // The current Handle. Only valid until a call to Next.
+ // See Take if you want to retain the handle.
+ Handle *Handle
+ err error
}
-// Next retrieves a handle for the next BTF blob.
+// Next retrieves a handle for the next BTF object.
//
-// [Handle.Close] is called if *handle is non-nil to avoid leaking fds.
-//
-// Returns true if another BTF blob was found. Call [HandleIterator.Err] after
+// Returns true if another BTF object was found. Call [HandleIterator.Err] after
// the function returns false.
-func (it *HandleIterator) Next(handle **Handle) bool {
- if *handle != nil {
- (*handle).Close()
- *handle = nil
- }
-
+func (it *HandleIterator) Next() bool {
id := it.ID
for {
attr := &sys.BtfGetNextIdAttr{Id: id}
err := sys.BtfGetNextId(attr)
if errors.Is(err, os.ErrNotExist) {
// There are no more BTF objects.
- return false
+ break
} else if err != nil {
it.err = fmt.Errorf("get next BTF ID: %w", err)
- return false
+ break
}
id = attr.NextId
- *handle, err = NewHandleFromID(id)
+ handle, err := NewHandleFromID(id)
if errors.Is(err, os.ErrNotExist) {
// Try again with the next ID.
continue
} else if err != nil {
it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
- return false
+ break
}
- it.ID = id
+ it.Handle.Close()
+ it.ID, it.Handle = id, handle
return true
}
+
+ // No more handles or we encountered an error.
+ it.Handle.Close()
+ it.Handle = nil
+ return false
+}
+
+// Take the ownership of the current handle.
+//
+// It's the callers responsibility to close the handle.
+func (it *HandleIterator) Take() *Handle {
+ handle := it.Handle
+ it.Handle = nil
+ return handle
}
// Err returns an error if iteration failed for some reason.
func (it *HandleIterator) Err() error {
return it.err
}
+
+// FindHandle returns the first handle for which predicate returns true.
+//
+// Requires CAP_SYS_ADMIN.
+//
+// Returns an error wrapping ErrNotFound if predicate never returns true or if
+// there is no BTF loaded into the kernel.
+func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) {
+ it := new(HandleIterator)
+ defer it.Handle.Close()
+
+ for it.Next() {
+ info, err := it.Handle.Info()
+ if err != nil {
+ return nil, fmt.Errorf("info for ID %d: %w", it.ID, err)
+ }
+
+ if predicate(info) {
+ return it.Take(), nil
+ }
+ }
+ if err := it.Err(); err != nil {
+ return nil, fmt.Errorf("iterate handles: %w", err)
+ }
+
+ return nil, fmt.Errorf("find handle: %w", ErrNotFound)
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go
new file mode 100644
index 000000000000..bfe53b41072c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/marshal.go
@@ -0,0 +1,543 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+
+ "golang.org/x/exp/slices"
+)
+
+type MarshalOptions struct {
+ // Target byte order. Defaults to the system's native endianness.
+ Order binary.ByteOrder
+ // Remove function linkage information for compatibility with <5.6 kernels.
+ StripFuncLinkage bool
+}
+
+// KernelMarshalOptions will generate BTF suitable for the current kernel.
+func KernelMarshalOptions() *MarshalOptions {
+ return &MarshalOptions{
+ Order: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ }
+}
+
+// encoder turns Types into raw BTF.
+type encoder struct {
+ MarshalOptions
+
+ pending internal.Deque[Type]
+ buf *bytes.Buffer
+ strings *stringTableBuilder
+ ids map[Type]TypeID
+ lastID TypeID
+}
+
+var bufferPool = sync.Pool{
+ New: func() any {
+ buf := make([]byte, btfHeaderLen+128)
+ return &buf
+ },
+}
+
+func getByteSlice() *[]byte {
+ return bufferPool.Get().(*[]byte)
+}
+
+func putByteSlice(buf *[]byte) {
+ *buf = (*buf)[:0]
+ bufferPool.Put(buf)
+}
+
+// Builder turns Types into raw BTF.
+//
+// The default value may be used and represents an empty BTF blob. Void is
+// added implicitly if necessary.
+type Builder struct {
+ // Explicitly added types.
+ types []Type
+ // IDs for all added types which the user knows about.
+ stableIDs map[Type]TypeID
+ // Explicitly added strings.
+ strings *stringTableBuilder
+}
+
+// NewBuilder creates a Builder from a list of types.
+//
+// It is more efficient than calling [Add] individually.
+//
+// Returns an error if adding any of the types fails.
+func NewBuilder(types []Type) (*Builder, error) {
+ b := &Builder{
+ make([]Type, 0, len(types)),
+ make(map[Type]TypeID, len(types)),
+ nil,
+ }
+
+ for _, typ := range types {
+ _, err := b.Add(typ)
+ if err != nil {
+ return nil, fmt.Errorf("add %s: %w", typ, err)
+ }
+ }
+
+ return b, nil
+}
+
+// Add a Type and allocate a stable ID for it.
+//
+// Adding the identical Type multiple times is valid and will return the same ID.
+//
+// See [Type] for details on identity.
+func (b *Builder) Add(typ Type) (TypeID, error) {
+ if b.stableIDs == nil {
+ b.stableIDs = make(map[Type]TypeID)
+ }
+
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ if ds, ok := typ.(*Datasec); ok {
+ if err := datasecResolveWorkaround(b, ds); err != nil {
+ return 0, err
+ }
+ }
+
+ id, ok := b.stableIDs[typ]
+ if ok {
+ return id, nil
+ }
+
+ b.types = append(b.types, typ)
+
+ id = TypeID(len(b.types))
+ if int(id) != len(b.types) {
+ return 0, fmt.Errorf("no more type IDs")
+ }
+
+ b.stableIDs[typ] = id
+ return id, nil
+}
+
+// Marshal encodes all types in the Marshaler into BTF wire format.
+//
+// opts may be nil.
+func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
+ stb := b.strings
+ if stb == nil {
+ // Assume that most types are named. This makes encoding large BTF like
+ // vmlinux a lot cheaper.
+ stb = newStringTableBuilder(len(b.types))
+ } else {
+ // Avoid modifying the Builder's string table.
+ stb = b.strings.Copy()
+ }
+
+ if opts == nil {
+ opts = &MarshalOptions{Order: internal.NativeEndian}
+ }
+
+ // Reserve space for the BTF header.
+ buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen]
+
+ w := internal.NewBuffer(buf)
+ defer internal.PutBuffer(w)
+
+ e := encoder{
+ MarshalOptions: *opts,
+ buf: w,
+ strings: stb,
+ lastID: TypeID(len(b.types)),
+ ids: make(map[Type]TypeID, len(b.types)),
+ }
+
+ // Ensure that types are marshaled in the exact order they were Add()ed.
+ // Otherwise the ID returned from Add() won't match.
+ e.pending.Grow(len(b.types))
+ for _, typ := range b.types {
+ e.pending.Push(typ)
+ e.ids[typ] = b.stableIDs[typ]
+ }
+
+ if err := e.deflatePending(); err != nil {
+ return nil, err
+ }
+
+ length := e.buf.Len()
+ typeLen := uint32(length - btfHeaderLen)
+
+ stringLen := e.strings.Length()
+ buf = e.strings.AppendEncoded(e.buf.Bytes())
+
+ // Fill out the header, and write it out.
+ header := &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(btfHeaderLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(stringLen),
+ }
+
+ err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header)
+ if err != nil {
+ return nil, fmt.Errorf("write header: %v", err)
+ }
+
+ return buf, nil
+}
+
+// addString adds a string to the resulting BTF.
+//
+// Adding the same string multiple times will return the same result.
+//
+// Returns an identifier into the string table or an error if the string
+// contains invalid characters.
+func (b *Builder) addString(str string) (uint32, error) {
+ if b.strings == nil {
+ b.strings = newStringTableBuilder(0)
+ }
+
+ return b.strings.Add(str)
+}
+
+func (e *encoder) allocateID(typ Type) error {
+ id := e.lastID + 1
+ if id < e.lastID {
+ return errors.New("type ID overflow")
+ }
+
+ e.pending.Push(typ)
+ e.ids[typ] = id
+ e.lastID = id
+ return nil
+}
+
+// id returns the ID for the given type or panics with an error.
+func (e *encoder) id(typ Type) TypeID {
+ if _, ok := typ.(*Void); ok {
+ return 0
+ }
+
+ id, ok := e.ids[typ]
+ if !ok {
+ panic(fmt.Errorf("no ID for type %v", typ))
+ }
+
+ return id
+}
+
+func (e *encoder) deflatePending() error {
+ // Declare root outside of the loop to avoid repeated heap allocations.
+ var root Type
+ skip := func(t Type) (skip bool) {
+ if t == root {
+ // Force descending into the current root type even if it already
+ // has an ID. Otherwise we miss children of types that have their
+ // ID pre-allocated via Add.
+ return false
+ }
+
+ _, isVoid := t.(*Void)
+ _, alreadyEncoded := e.ids[t]
+ return isVoid || alreadyEncoded
+ }
+
+ for !e.pending.Empty() {
+ root = e.pending.Shift()
+
+ // Allocate IDs for all children of typ, including transitive dependencies.
+ iter := postorderTraversal(root, skip)
+ for iter.Next() {
+ if iter.Type == root {
+ // The iterator yields root at the end, do not allocate another ID.
+ break
+ }
+
+ if err := e.allocateID(iter.Type); err != nil {
+ return err
+ }
+ }
+
+ if err := e.deflateType(root); err != nil {
+ id := e.ids[root]
+ return fmt.Errorf("deflate %v with ID %d: %w", root, id, err)
+ }
+ }
+
+ return nil
+}
+
+func (e *encoder) deflateType(typ Type) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ err, ok = r.(error)
+ if !ok {
+ panic(r)
+ }
+ }
+ }()
+
+ var raw rawType
+ raw.NameOff, err = e.strings.Add(typ.TypeName())
+ if err != nil {
+ return err
+ }
+
+ switch v := typ.(type) {
+ case *Void:
+ return errors.New("Void is implicit in BTF wire format")
+
+ case *Int:
+ raw.SetKind(kindInt)
+ raw.SetSize(v.Size)
+
+ var bi btfInt
+ bi.SetEncoding(v.Encoding)
+ // We need to set bits in addition to size, since btf_type_int_is_regular
+ // otherwise flags this as a bitfield.
+ bi.SetBits(byte(v.Size) * 8)
+ raw.data = bi
+
+ case *Pointer:
+ raw.SetKind(kindPointer)
+ raw.SetType(e.id(v.Target))
+
+ case *Array:
+ raw.SetKind(kindArray)
+ raw.data = &btfArray{
+ e.id(v.Type),
+ e.id(v.Index),
+ v.Nelems,
+ }
+
+ case *Struct:
+ raw.SetKind(kindStruct)
+ raw.SetSize(v.Size)
+ raw.data, err = e.convertMembers(&raw.btfType, v.Members)
+
+ case *Union:
+ raw.SetKind(kindUnion)
+ raw.SetSize(v.Size)
+ raw.data, err = e.convertMembers(&raw.btfType, v.Members)
+
+ case *Enum:
+ raw.SetSize(v.size())
+ raw.SetVlen(len(v.Values))
+ raw.SetSigned(v.Signed)
+
+ if v.has64BitValues() {
+ raw.SetKind(kindEnum64)
+ raw.data, err = e.deflateEnum64Values(v.Values)
+ } else {
+ raw.SetKind(kindEnum)
+ raw.data, err = e.deflateEnumValues(v.Values)
+ }
+
+ case *Fwd:
+ raw.SetKind(kindForward)
+ raw.SetFwdKind(v.Kind)
+
+ case *Typedef:
+ raw.SetKind(kindTypedef)
+ raw.SetType(e.id(v.Type))
+
+ case *Volatile:
+ raw.SetKind(kindVolatile)
+ raw.SetType(e.id(v.Type))
+
+ case *Const:
+ raw.SetKind(kindConst)
+ raw.SetType(e.id(v.Type))
+
+ case *Restrict:
+ raw.SetKind(kindRestrict)
+ raw.SetType(e.id(v.Type))
+
+ case *Func:
+ raw.SetKind(kindFunc)
+ raw.SetType(e.id(v.Type))
+ if !e.StripFuncLinkage {
+ raw.SetLinkage(v.Linkage)
+ }
+
+ case *FuncProto:
+ raw.SetKind(kindFuncProto)
+ raw.SetType(e.id(v.Return))
+ raw.SetVlen(len(v.Params))
+ raw.data, err = e.deflateFuncParams(v.Params)
+
+ case *Var:
+ raw.SetKind(kindVar)
+ raw.SetType(e.id(v.Type))
+ raw.data = btfVariable{uint32(v.Linkage)}
+
+ case *Datasec:
+ raw.SetKind(kindDatasec)
+ raw.SetSize(v.Size)
+ raw.SetVlen(len(v.Vars))
+ raw.data = e.deflateVarSecinfos(v.Vars)
+
+ case *Float:
+ raw.SetKind(kindFloat)
+ raw.SetSize(v.Size)
+
+ case *declTag:
+ raw.SetKind(kindDeclTag)
+ raw.SetType(e.id(v.Type))
+ raw.data = &btfDeclTag{uint32(v.Index)}
+ raw.NameOff, err = e.strings.Add(v.Value)
+
+ case *typeTag:
+ raw.SetKind(kindTypeTag)
+ raw.SetType(e.id(v.Type))
+ raw.NameOff, err = e.strings.Add(v.Value)
+
+ default:
+ return fmt.Errorf("don't know how to deflate %T", v)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ return raw.Marshal(e.buf, e.Order)
+}
+
+func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) {
+ bms := make([]btfMember, 0, len(members))
+ isBitfield := false
+ for _, member := range members {
+ isBitfield = isBitfield || member.BitfieldSize > 0
+
+ offset := member.Offset
+ if isBitfield {
+ offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff)
+ }
+
+ nameOff, err := e.strings.Add(member.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bms = append(bms, btfMember{
+ nameOff,
+ e.id(member.Type),
+ uint32(offset),
+ })
+ }
+
+ header.SetVlen(len(members))
+ header.SetBitfield(isBitfield)
+ return bms, nil
+}
+
+func (e *encoder) deflateEnumValues(values []EnumValue) ([]btfEnum, error) {
+ bes := make([]btfEnum, 0, len(values))
+ for _, value := range values {
+ nameOff, err := e.strings.Add(value.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ if value.Value > math.MaxUint32 {
+ return nil, fmt.Errorf("value of enum %q exceeds 32 bits", value.Name)
+ }
+
+ bes = append(bes, btfEnum{
+ nameOff,
+ uint32(value.Value),
+ })
+ }
+
+ return bes, nil
+}
+
+func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) {
+ bes := make([]btfEnum64, 0, len(values))
+ for _, value := range values {
+ nameOff, err := e.strings.Add(value.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bes = append(bes, btfEnum64{
+ nameOff,
+ uint32(value.Value),
+ uint32(value.Value >> 32),
+ })
+ }
+
+ return bes, nil
+}
+
+func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) {
+ bps := make([]btfParam, 0, len(params))
+ for _, param := range params {
+ nameOff, err := e.strings.Add(param.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bps = append(bps, btfParam{
+ nameOff,
+ e.id(param.Type),
+ })
+ }
+ return bps, nil
+}
+
+func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
+ vsis := make([]btfVarSecinfo, 0, len(vars))
+ for _, v := range vars {
+ vsis = append(vsis, btfVarSecinfo{
+ e.id(v.Type),
+ v.Offset,
+ v.Size,
+ })
+ }
+ return vsis
+}
+
+// MarshalMapKV creates a BTF object containing a map key and value.
+//
+// The function is intended for the use of the ebpf package and may be removed
+// at any point in time.
+func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) {
+ var b Builder
+
+ if key != nil {
+ keyID, err = b.Add(key)
+ if err != nil {
+ return nil, 0, 0, fmt.Errorf("add key type: %w", err)
+ }
+ }
+
+ if value != nil {
+ valueID, err = b.Add(value)
+ if err != nil {
+ return nil, 0, 0, fmt.Errorf("add value type: %w", err)
+ }
+ }
+
+ handle, err := NewHandle(&b)
+ if err != nil {
+ // Check for 'full' map BTF support, since kernels between 4.18 and 5.2
+ // already support BTF blobs for maps without Var or Datasec just fine.
+ if err := haveMapBTF(); err != nil {
+ return nil, 0, 0, err
+ }
+ }
+ return handle, keyID, valueID, err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go
index 67626e0dd172..bc6aff28142d 100644
--- a/vendor/github.com/cilium/ebpf/btf/strings.go
+++ b/vendor/github.com/cilium/ebpf/btf/strings.go
@@ -6,6 +6,9 @@ import (
"errors"
"fmt"
"io"
+ "strings"
+
+ "golang.org/x/exp/maps"
)
type stringTable struct {
@@ -88,11 +91,6 @@ func (st *stringTable) lookup(offset uint32) (string, error) {
return st.strings[i], nil
}
-func (st *stringTable) Length() int {
- last := len(st.offsets) - 1
- return int(st.offsets[last]) + len(st.strings[last]) + 1
-}
-
func (st *stringTable) Marshal(w io.Writer) error {
for _, str := range st.strings {
_, err := io.WriteString(w, str)
@@ -107,6 +105,11 @@ func (st *stringTable) Marshal(w io.Writer) error {
return nil
}
+// Num returns the number of strings in the table.
+func (st *stringTable) Num() int {
+ return len(st.strings)
+}
+
// search is a copy of sort.Search specialised for uint32.
//
// Licensed under https://go.dev/LICENSE
@@ -126,3 +129,86 @@ func search(ints []uint32, needle uint32) int {
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i
}
+
+// stringTableBuilder builds BTF string tables.
+type stringTableBuilder struct {
+ length uint32
+ strings map[string]uint32
+}
+
+// newStringTableBuilder creates a builder with the given capacity.
+//
+// capacity may be zero.
+func newStringTableBuilder(capacity int) *stringTableBuilder {
+ var stb stringTableBuilder
+
+ if capacity == 0 {
+ // Use the runtime's small default size.
+ stb.strings = make(map[string]uint32)
+ } else {
+ stb.strings = make(map[string]uint32, capacity)
+ }
+
+ // Ensure that the empty string is at index 0.
+ stb.append("")
+ return &stb
+}
+
+// Add a string to the table.
+//
+// Adding the same string multiple times will only store it once.
+func (stb *stringTableBuilder) Add(str string) (uint32, error) {
+ if strings.IndexByte(str, 0) != -1 {
+ return 0, fmt.Errorf("string contains null: %q", str)
+ }
+
+ offset, ok := stb.strings[str]
+ if ok {
+ return offset, nil
+ }
+
+ return stb.append(str), nil
+}
+
+func (stb *stringTableBuilder) append(str string) uint32 {
+ offset := stb.length
+ stb.length += uint32(len(str)) + 1
+ stb.strings[str] = offset
+ return offset
+}
+
+// Lookup finds the offset of a string in the table.
+//
+// Returns an error if str hasn't been added yet.
+func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
+ offset, ok := stb.strings[str]
+ if !ok {
+ return 0, fmt.Errorf("string %q is not in table", str)
+ }
+
+ return offset, nil
+}
+
+// Length returns the length in bytes.
+func (stb *stringTableBuilder) Length() int {
+ return int(stb.length)
+}
+
+// AppendEncoded appends the string table to the end of the provided buffer.
+func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte {
+ n := len(buf)
+ buf = append(buf, make([]byte, stb.Length())...)
+ strings := buf[n:]
+ for str, offset := range stb.strings {
+ copy(strings[offset:], str)
+ }
+ return buf
+}
+
+// Copy the string table builder.
+func (stb *stringTableBuilder) Copy() *stringTableBuilder {
+ return &stringTableBuilder{
+ stb.length,
+ maps.Clone(stb.strings),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go
new file mode 100644
index 000000000000..a3a9dec940a9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/traversal.go
@@ -0,0 +1,141 @@
+package btf
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+// Functions to traverse a cyclic graph of types. The below was very useful:
+// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
+
+type postorderIterator struct {
+ // Iteration skips types for which this function returns true.
+ skip func(Type) bool
+ // The root type. May be nil if skip(root) is true.
+ root Type
+
+ // Contains types which need to be either walked or yielded.
+ types typeDeque
+ // Contains a boolean whether the type has been walked or not.
+ walked internal.Deque[bool]
+ // The set of types which has been pushed onto types.
+ pushed map[Type]struct{}
+
+ // The current type. Only valid after a call to Next().
+ Type Type
+}
+
+// postorderTraversal iterates all types reachable from root by visiting the
+// leaves of the graph first.
+//
+// Types for which skip returns true are ignored. skip may be nil.
+func postorderTraversal(root Type, skip func(Type) (skip bool)) postorderIterator {
+ // Avoid allocations for the common case of a skipped root.
+ if skip != nil && skip(root) {
+ return postorderIterator{}
+ }
+
+ po := postorderIterator{root: root, skip: skip}
+ walkType(root, po.push)
+
+ return po
+}
+
+func (po *postorderIterator) push(t *Type) {
+ if _, ok := po.pushed[*t]; ok || *t == po.root {
+ return
+ }
+
+ if po.skip != nil && po.skip(*t) {
+ return
+ }
+
+ if po.pushed == nil {
+ // Lazily allocate pushed to avoid an allocation for Types without children.
+ po.pushed = make(map[Type]struct{})
+ }
+
+ po.pushed[*t] = struct{}{}
+ po.types.Push(t)
+ po.walked.Push(false)
+}
+
+// Next returns true if there is another Type to traverse.
+func (po *postorderIterator) Next() bool {
+ for !po.types.Empty() {
+ t := po.types.Pop()
+
+ if !po.walked.Pop() {
+ // Push the type again, so that we re-evaluate it in done state
+ // after all children have been handled.
+ po.types.Push(t)
+ po.walked.Push(true)
+
+ // Add all direct children to todo.
+ walkType(*t, po.push)
+ } else {
+ // We've walked this type previously, so we now know that all
+ // children have been handled.
+ po.Type = *t
+ return true
+ }
+ }
+
+ // Only return root once.
+ po.Type, po.root = po.root, nil
+ return po.Type != nil
+}
+
+// walkType calls fn on each child of typ.
+func walkType(typ Type, fn func(*Type)) {
+ // Explicitly type switch on the most common types to allow the inliner to
+ // do its work. This avoids allocating intermediate slices from walk() on
+ // the heap.
+ switch v := typ.(type) {
+ case *Void, *Int, *Enum, *Fwd, *Float:
+ // No children to traverse.
+ case *Pointer:
+ fn(&v.Target)
+ case *Array:
+ fn(&v.Index)
+ fn(&v.Type)
+ case *Struct:
+ for i := range v.Members {
+ fn(&v.Members[i].Type)
+ }
+ case *Union:
+ for i := range v.Members {
+ fn(&v.Members[i].Type)
+ }
+ case *Typedef:
+ fn(&v.Type)
+ case *Volatile:
+ fn(&v.Type)
+ case *Const:
+ fn(&v.Type)
+ case *Restrict:
+ fn(&v.Type)
+ case *Func:
+ fn(&v.Type)
+ case *FuncProto:
+ fn(&v.Return)
+ for i := range v.Params {
+ fn(&v.Params[i].Type)
+ }
+ case *Var:
+ fn(&v.Type)
+ case *Datasec:
+ for i := range v.Vars {
+ fn(&v.Vars[i].Type)
+ }
+ case *declTag:
+ fn(&v.Type)
+ case *typeTag:
+ fn(&v.Type)
+ case *cycle:
+ // cycle has children, but we ignore them deliberately.
+ default:
+ panic(fmt.Sprintf("don't know how to walk Type %T", v))
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
index 402a363c28ac..68d4a1757166 100644
--- a/vendor/github.com/cilium/ebpf/btf/types.go
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -1,6 +1,7 @@
package btf
import (
+ "errors"
"fmt"
"io"
"math"
@@ -8,14 +9,27 @@ import (
"strings"
"github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
)
const maxTypeDepth = 32
// TypeID identifies a type in a BTF section.
-type TypeID uint32
+type TypeID = sys.TypeID
// Type represents a type described by BTF.
+//
+// Identity of Type follows the [Go specification]: two Types are considered
+// equal if they have the same concrete type and the same dynamic value, aka
+// they point at the same location in memory. This means that the following
+// Types are considered distinct even though they have the same "shape".
+//
+// a := &Int{Size: 1}
+// b := &Int{Size: 1}
+// a != b
+//
+// [Go specification]: https://go.dev/ref/spec#Comparison_operators
type Type interface {
// Type can be formatted using the %s and %v verbs. %s outputs only the
// identity of the type, without any detail. %v outputs additional detail.
@@ -35,9 +49,7 @@ type Type interface {
// Make a copy of the type, without copying Type members.
copy() Type
- // Enumerate all nested Types. Repeated calls must visit nested
- // types in the same order.
- walk(*typeDeque)
+ // New implementations must update walkType.
}
var (
@@ -51,20 +63,11 @@ var (
_ Type = (*Var)(nil)
_ Type = (*Datasec)(nil)
_ Type = (*Float)(nil)
+ _ Type = (*declTag)(nil)
+ _ Type = (*typeTag)(nil)
+ _ Type = (*cycle)(nil)
)
-// types is a list of Type.
-//
-// The order determines the ID of a type.
-type types []Type
-
-func (ts types) ByID(id TypeID) (Type, error) {
- if int(id) > len(ts) {
- return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
- }
- return ts[id], nil
-}
-
// Void is the unit type of BTF.
type Void struct{}
@@ -72,40 +75,32 @@ func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
func (v *Void) TypeName() string { return "" }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
-func (v *Void) walk(*typeDeque) {}
type IntEncoding byte
+// Valid IntEncodings.
+//
+// These may look like they are flags, but they aren't.
const (
- Signed IntEncoding = 1 << iota
- Char
- Bool
+ Unsigned IntEncoding = 0
+ Signed IntEncoding = 1
+ Char IntEncoding = 2
+ Bool IntEncoding = 4
)
-func (ie IntEncoding) IsSigned() bool {
- return ie&Signed != 0
-}
-
-func (ie IntEncoding) IsChar() bool {
- return ie&Char != 0
-}
-
-func (ie IntEncoding) IsBool() bool {
- return ie&Bool != 0
-}
-
func (ie IntEncoding) String() string {
- switch {
- case ie.IsChar() && ie.IsSigned():
+ switch ie {
+ case Char:
+ // NB: There is no way to determine signedness for char.
return "char"
- case ie.IsChar() && !ie.IsSigned():
- return "uchar"
- case ie.IsBool():
+ case Bool:
return "bool"
- case ie.IsSigned():
+ case Signed:
return "signed"
- default:
+ case Unsigned:
return "unsigned"
+ default:
+ return fmt.Sprintf("IntEncoding(%d)", byte(ie))
}
}
@@ -126,7 +121,6 @@ func (i *Int) Format(fs fmt.State, verb rune) {
func (i *Int) TypeName() string { return i.Name }
func (i *Int) size() uint32 { return i.Size }
-func (i *Int) walk(*typeDeque) {}
func (i *Int) copy() Type {
cpy := *i
return &cpy
@@ -141,9 +135,8 @@ func (p *Pointer) Format(fs fmt.State, verb rune) {
formatType(fs, verb, p, "target=", p.Target)
}
-func (p *Pointer) TypeName() string { return "" }
-func (p *Pointer) size() uint32 { return 8 }
-func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
+func (p *Pointer) TypeName() string { return "" }
+func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) copy() Type {
cpy := *p
return &cpy
@@ -162,11 +155,6 @@ func (arr *Array) Format(fs fmt.State, verb rune) {
func (arr *Array) TypeName() string { return "" }
-func (arr *Array) walk(tdq *typeDeque) {
- tdq.push(&arr.Index)
- tdq.push(&arr.Type)
-}
-
func (arr *Array) copy() Type {
cpy := *arr
return &cpy
@@ -188,12 +176,6 @@ func (s *Struct) TypeName() string { return s.Name }
func (s *Struct) size() uint32 { return s.Size }
-func (s *Struct) walk(tdq *typeDeque) {
- for i := range s.Members {
- tdq.push(&s.Members[i].Type)
- }
-}
-
func (s *Struct) copy() Type {
cpy := *s
cpy.Members = copyMembers(s.Members)
@@ -220,12 +202,6 @@ func (u *Union) TypeName() string { return u.Name }
func (u *Union) size() uint32 { return u.Size }
-func (u *Union) walk(tdq *typeDeque) {
- for i := range u.Members {
- tdq.push(&u.Members[i].Type)
- }
-}
-
func (u *Union) copy() Type {
cpy := *u
cpy.Members = copyMembers(u.Members)
@@ -243,6 +219,7 @@ func copyMembers(orig []Member) []Member {
}
type composite interface {
+ Type
members() []Member
}
@@ -273,7 +250,9 @@ type Member struct {
type Enum struct {
Name string
// Size of the enum value in bytes.
- Size uint32
+ Size uint32
+ // True if the values should be interpreted as signed integers.
+ Signed bool
Values []EnumValue
}
@@ -288,11 +267,10 @@ func (e *Enum) TypeName() string { return e.Name }
// Is is not a valid Type
type EnumValue struct {
Name string
- Value int32
+ Value uint64
}
-func (e *Enum) size() uint32 { return e.Size }
-func (e *Enum) walk(*typeDeque) {}
+func (e *Enum) size() uint32 { return e.Size }
func (e *Enum) copy() Type {
cpy := *e
cpy.Values = make([]EnumValue, len(e.Values))
@@ -300,6 +278,21 @@ func (e *Enum) copy() Type {
return &cpy
}
+// has64BitValues returns true if the Enum contains a value larger than 32 bits.
+// Kernels before 6.0 have enum values that overrun u32 replaced with zeroes.
+//
+// 64-bit enums have their Enum.Size attributes correctly set to 8, but if we
+// use the size attribute as a heuristic during BTF marshaling, we'll emit
+// ENUM64s to kernels that don't support them.
+func (e *Enum) has64BitValues() bool {
+ for _, v := range e.Values {
+ if v.Value > math.MaxUint32 {
+ return true
+ }
+ }
+ return false
+}
+
// FwdKind is the type of forward declaration.
type FwdKind int
@@ -332,7 +325,6 @@ func (f *Fwd) Format(fs fmt.State, verb rune) {
func (f *Fwd) TypeName() string { return f.Name }
-func (f *Fwd) walk(*typeDeque) {}
func (f *Fwd) copy() Type {
cpy := *f
return &cpy
@@ -350,7 +342,6 @@ func (td *Typedef) Format(fs fmt.State, verb rune) {
func (td *Typedef) TypeName() string { return td.Name }
-func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
func (td *Typedef) copy() Type {
cpy := *td
return &cpy
@@ -367,8 +358,7 @@ func (v *Volatile) Format(fs fmt.State, verb rune) {
func (v *Volatile) TypeName() string { return "" }
-func (v *Volatile) qualify() Type { return v.Type }
-func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Volatile) qualify() Type { return v.Type }
func (v *Volatile) copy() Type {
cpy := *v
return &cpy
@@ -385,8 +375,7 @@ func (c *Const) Format(fs fmt.State, verb rune) {
func (c *Const) TypeName() string { return "" }
-func (c *Const) qualify() Type { return c.Type }
-func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
+func (c *Const) qualify() Type { return c.Type }
func (c *Const) copy() Type {
cpy := *c
return &cpy
@@ -403,8 +392,7 @@ func (r *Restrict) Format(fs fmt.State, verb rune) {
func (r *Restrict) TypeName() string { return "" }
-func (r *Restrict) qualify() Type { return r.Type }
-func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
+func (r *Restrict) qualify() Type { return r.Type }
func (r *Restrict) copy() Type {
cpy := *r
return &cpy
@@ -422,13 +410,18 @@ func FuncMetadata(ins *asm.Instruction) *Func {
return fn
}
+// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction.
+func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction {
+ ins.Metadata.Set(funcInfoMeta{}, fn)
+ return ins
+}
+
func (f *Func) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
}
func (f *Func) TypeName() string { return f.Name }
-func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
func (f *Func) copy() Type {
cpy := *f
return &cpy
@@ -446,13 +439,6 @@ func (fp *FuncProto) Format(fs fmt.State, verb rune) {
func (fp *FuncProto) TypeName() string { return "" }
-func (fp *FuncProto) walk(tdq *typeDeque) {
- tdq.push(&fp.Return)
- for i := range fp.Params {
- tdq.push(&fp.Params[i].Type)
- }
-}
-
func (fp *FuncProto) copy() Type {
cpy := *fp
cpy.Params = make([]FuncParam, len(fp.Params))
@@ -478,7 +464,6 @@ func (v *Var) Format(fs fmt.State, verb rune) {
func (v *Var) TypeName() string { return v.Name }
-func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Var) copy() Type {
cpy := *v
return &cpy
@@ -499,12 +484,6 @@ func (ds *Datasec) TypeName() string { return ds.Name }
func (ds *Datasec) size() uint32 { return ds.Size }
-func (ds *Datasec) walk(tdq *typeDeque) {
- for i := range ds.Vars {
- tdq.push(&ds.Vars[i].Type)
- }
-}
-
func (ds *Datasec) copy() Type {
cpy := *ds
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
@@ -516,6 +495,7 @@ func (ds *Datasec) copy() Type {
//
// It is not a valid Type.
type VarSecinfo struct {
+ // Var or Func.
Type Type
Offset uint32
Size uint32
@@ -535,12 +515,48 @@ func (f *Float) Format(fs fmt.State, verb rune) {
func (f *Float) TypeName() string { return f.Name }
func (f *Float) size() uint32 { return f.Size }
-func (f *Float) walk(*typeDeque) {}
func (f *Float) copy() Type {
cpy := *f
return &cpy
}
+// declTag associates metadata with a declaration.
+type declTag struct {
+ Type Type
+ Value string
+ // The index this tag refers to in the target type. For composite types,
+ // a value of -1 indicates that the tag refers to the whole type. Otherwise
+ // it indicates which member or argument the tag applies to.
+ Index int
+}
+
+func (dt *declTag) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index)
+}
+
+func (dt *declTag) TypeName() string { return "" }
+func (dt *declTag) copy() Type {
+ cpy := *dt
+ return &cpy
+}
+
+// typeTag associates metadata with a type.
+type typeTag struct {
+ Type Type
+ Value string
+}
+
+func (tt *typeTag) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value)
+}
+
+func (tt *typeTag) TypeName() string { return "" }
+func (tt *typeTag) qualify() Type { return tt.Type }
+func (tt *typeTag) copy() Type {
+ cpy := *tt
+ return &cpy
+}
+
// cycle is a type which had to be elided since it exceeded maxTypeDepth.
type cycle struct {
root Type
@@ -549,7 +565,6 @@ type cycle struct {
func (c *cycle) ID() TypeID { return math.MaxUint32 }
func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
func (c *cycle) TypeName() string { return "" }
-func (c *cycle) walk(*typeDeque) {}
func (c *cycle) copy() Type {
cpy := *c
return &cpy
@@ -576,8 +591,11 @@ var (
_ qualifier = (*Const)(nil)
_ qualifier = (*Restrict)(nil)
_ qualifier = (*Volatile)(nil)
+ _ qualifier = (*typeTag)(nil)
)
+var errUnsizedType = errors.New("type is unsized")
+
// Sizeof returns the size of a type in bytes.
//
// Returns an error if the size can't be computed.
@@ -612,7 +630,7 @@ func Sizeof(typ Type) (int, error) {
continue
default:
- return 0, fmt.Errorf("unsized type %T", typ)
+ return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType)
}
if n > 0 && elem > math.MaxInt64/n {
@@ -632,16 +650,33 @@ func Sizeof(typ Type) (int, error) {
// alignof returns the alignment of a type.
//
-// Currently only supports the subset of types necessary for bitfield relocations.
+// Returns an error if the Type can't be aligned, like an integer with an uneven
+// size. Currently only supports the subset of types necessary for bitfield
+// relocations.
func alignof(typ Type) (int, error) {
+ var n int
+
switch t := UnderlyingType(typ).(type) {
case *Enum:
- return int(t.size()), nil
+ n = int(t.size())
case *Int:
- return int(t.Size), nil
+ n = int(t.Size)
+ case *Array:
+ return alignof(t.Type)
default:
return 0, fmt.Errorf("can't calculate alignment of %T", t)
}
+
+ if !pow(n) {
+ return 0, fmt.Errorf("alignment value %d is not a power of two", n)
+ }
+
+ return n, nil
+}
+
+// pow returns true if n is a power of two.
+func pow(n int) bool {
+ return n != 0 && (n&(n-1)) == 0
}
// Transformer modifies a given Type and returns the result.
@@ -655,7 +690,7 @@ type Transformer func(Type) Type
// typ may form a cycle. If transform is not nil, it is called with the
// to be copied type, and the returned value is copied instead.
func Copy(typ Type, transform Transformer) Type {
- copies := make(copier)
+ copies := copier{copies: make(map[Type]Type)}
copies.copy(&typ, transform)
return typ
}
@@ -667,7 +702,7 @@ func copyTypes(types []Type, transform Transformer) []Type {
result := make([]Type, len(types))
copy(result, types)
- copies := make(copier)
+ copies := copier{copies: make(map[Type]Type, len(types))}
for i := range result {
copies.copy(&result[i], transform)
}
@@ -675,13 +710,15 @@ func copyTypes(types []Type, transform Transformer) []Type {
return result
}
-type copier map[Type]Type
+type copier struct {
+ copies map[Type]Type
+ work typeDeque
+}
-func (c copier) copy(typ *Type, transform Transformer) {
- var work typeDeque
- for t := typ; t != nil; t = work.pop() {
+func (c *copier) copy(typ *Type, transform Transformer) {
+ for t := typ; t != nil; t = c.work.Pop() {
// *t is the identity of the type.
- if cpy := c[*t]; cpy != nil {
+ if cpy := c.copies[*t]; cpy != nil {
*t = cpy
continue
}
@@ -693,108 +730,41 @@ func (c copier) copy(typ *Type, transform Transformer) {
cpy = (*t).copy()
}
- c[*t] = cpy
+ c.copies[*t] = cpy
*t = cpy
// Mark any nested types for copying.
- cpy.walk(&work)
- }
-}
-
-// typeDeque keeps track of pointers to types which still
-// need to be visited.
-type typeDeque struct {
- types []*Type
- read, write uint64
- mask uint64
-}
-
-func (dq *typeDeque) empty() bool {
- return dq.read == dq.write
-}
-
-// push adds a type to the stack.
-func (dq *typeDeque) push(t *Type) {
- if dq.write-dq.read < uint64(len(dq.types)) {
- dq.types[dq.write&dq.mask] = t
- dq.write++
- return
- }
-
- new := len(dq.types) * 2
- if new == 0 {
- new = 8
- }
-
- types := make([]*Type, new)
- pivot := dq.read & dq.mask
- n := copy(types, dq.types[pivot:])
- n += copy(types[n:], dq.types[:pivot])
- types[n] = t
-
- dq.types = types
- dq.mask = uint64(new) - 1
- dq.read, dq.write = 0, uint64(n+1)
-}
-
-// shift returns the first element or null.
-func (dq *typeDeque) shift() *Type {
- if dq.empty() {
- return nil
+ walkType(cpy, c.work.Push)
}
-
- index := dq.read & dq.mask
- t := dq.types[index]
- dq.types[index] = nil
- dq.read++
- return t
}
-// pop returns the last element or null.
-func (dq *typeDeque) pop() *Type {
- if dq.empty() {
- return nil
- }
-
- dq.write--
- index := dq.write & dq.mask
- t := dq.types[index]
- dq.types[index] = nil
- return t
-}
-
-// all returns all elements.
-//
-// The deque is empty after calling this method.
-func (dq *typeDeque) all() []*Type {
- length := dq.write - dq.read
- types := make([]*Type, 0, length)
- for t := dq.shift(); t != nil; t = dq.shift() {
- types = append(types, t)
- }
- return types
-}
+type typeDeque = internal.Deque[*Type]
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
// it into a graph of Types connected via pointers.
//
-// If baseTypes are provided, then the raw types are
-// considered to be of a split BTF (e.g., a kernel module).
+// If base is provided, then the raw types are considered to be of a split BTF
+// (e.g., a kernel module).
//
-// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
+// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
// units, multiple types may share the same name. A Type may form a cyclic graph
// by pointing at itself.
-func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) {
+func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([]Type, error) {
types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types
- typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1)
+ // Void is defined to always be type ID 0, and is thus omitted from BTF.
+ types = append(types, (*Void)(nil))
- if baseTypes == nil {
- // Void is defined to always be type ID 0, and is thus omitted from BTF.
- types = append(types, (*Void)(nil))
- } else {
- // For split BTF, the next ID is max base BTF type ID + 1
- typeIDOffset = TypeID(len(baseTypes))
+ firstTypeID := TypeID(0)
+ if base != nil {
+ var err error
+ firstTypeID, err = base.nextTypeID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Split BTF doesn't contain Void.
+ types = types[:0]
}
type fixupDef struct {
@@ -803,39 +773,42 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
var fixups []fixupDef
- fixup := func(id TypeID, typ *Type) {
- if id < TypeID(len(baseTypes)) {
- *typ = baseTypes[id]
- return
+ fixup := func(id TypeID, typ *Type) bool {
+ if id < firstTypeID {
+ if baseType, err := base.TypeByID(id); err == nil {
+ *typ = baseType
+ return true
+ }
}
- idx := id
- if baseTypes != nil {
- idx = id - TypeID(len(baseTypes))
- }
- if idx < TypeID(len(types)) {
+ idx := int(id - firstTypeID)
+ if idx < len(types) {
// We've already inflated this type, fix it up immediately.
*typ = types[idx]
- return
+ return true
}
+
fixups = append(fixups, fixupDef{id, typ})
+ return false
}
type assertion struct {
+ id TypeID
typ *Type
want reflect.Type
}
var assertions []assertion
- assert := func(typ *Type, want reflect.Type) error {
- if *typ != nil {
- // The type has already been fixed up, check the type immediately.
- if reflect.TypeOf(*typ) != want {
- return fmt.Errorf("expected %s, got %T", want, *typ)
- }
+ fixupAndAssert := func(id TypeID, typ *Type, want reflect.Type) error {
+ if !fixup(id, typ) {
+ assertions = append(assertions, assertion{id, typ, want})
return nil
}
- assertions = append(assertions, assertion{typ, want})
+
+ // The type has already been fixed up, check the type immediately.
+ if reflect.TypeOf(*typ) != want {
+ return fmt.Errorf("type ID %d: expected %s, got %T", id, want, *typ)
+ }
return nil
}
@@ -903,12 +876,17 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
return members, nil
}
- for i, raw := range rawTypes {
+ var declTags []*declTag
+ for _, raw := range rawTypes {
var (
- id = typeIDOffset + TypeID(i)
+ id = firstTypeID + TypeID(len(types))
typ Type
)
+ if id < firstTypeID {
+ return nil, fmt.Errorf("no more type IDs")
+ }
+
name, err := rawStrings.Lookup(raw.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for type id %d: %w", id, err)
@@ -936,14 +914,14 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
typ = arr
case kindStruct:
- members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
if err != nil {
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{name, raw.Size(), members}
case kindUnion:
- members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
if err != nil {
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
@@ -952,24 +930,23 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
case kindEnum:
rawvals := raw.data.([]btfEnum)
vals := make([]EnumValue, 0, len(rawvals))
+ signed := raw.Signed()
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
}
- vals = append(vals, EnumValue{
- Name: name,
- Value: btfVal.Val,
- })
+ value := uint64(btfVal.Val)
+ if signed {
+ // Sign extend values to 64 bit.
+ value = uint64(int32(btfVal.Val))
+ }
+ vals = append(vals, EnumValue{name, value})
}
- typ = &Enum{name, raw.Size(), vals}
+ typ = &Enum{name, raw.Size(), signed, vals}
case kindForward:
- if raw.KindFlag() {
- typ = &Fwd{name, FwdUnion}
- } else {
- typ = &Fwd{name, FwdStruct}
- }
+ typ = &Fwd{name, raw.FwdKind()}
case kindTypedef:
typedef := &Typedef{name, nil}
@@ -993,8 +970,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
case kindFunc:
fn := &Func{name, nil, raw.Linkage()}
- fixup(raw.Type(), &fn.Type)
- if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
+ if err := fixupAndAssert(raw.Type(), &fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
return nil, err
}
typ = fn
@@ -1036,15 +1012,42 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
for i := range vars {
fixup(btfVars[i].Type, &vars[i].Type)
- if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil {
- return nil, err
- }
}
- typ = &Datasec{name, raw.SizeType, vars}
+ typ = &Datasec{name, raw.Size(), vars}
case kindFloat:
typ = &Float{name, raw.Size()}
+ case kindDeclTag:
+ btfIndex := raw.data.(*btfDeclTag).ComponentIdx
+ if uint64(btfIndex) > math.MaxInt {
+ return nil, fmt.Errorf("type id %d: index exceeds int", id)
+ }
+
+ dt := &declTag{nil, name, int(int32(btfIndex))}
+ fixup(raw.Type(), &dt.Type)
+ typ = dt
+
+ declTags = append(declTags, dt)
+
+ case kindTypeTag:
+ tt := &typeTag{nil, name}
+ fixup(raw.Type(), &tt.Type)
+ typ = tt
+
+ case kindEnum64:
+ rawvals := raw.data.([]btfEnum64)
+ vals := make([]EnumValue, 0, len(rawvals))
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err)
+ }
+ value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32)
+ vals = append(vals, EnumValue{name, value})
+ }
+ typ = &Enum{name, raw.Size(), raw.Signed(), vals}
+
default:
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
@@ -1053,19 +1056,20 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
for _, fixup := range fixups {
- i := int(fixup.id)
- if i >= len(types)+len(baseTypes) {
- return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ if fixup.id < firstTypeID {
+ return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id)
}
- if i < len(baseTypes) {
- return nil, fmt.Errorf("fixup for base type id %d is not expected", i)
+
+ idx := int(fixup.id - firstTypeID)
+ if idx >= len(types) {
+ return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
- *fixup.typ = types[i-len(baseTypes)]
+ *fixup.typ = types[idx]
}
for _, bitfieldFixup := range bitfieldFixups {
- if bitfieldFixup.id < TypeID(len(baseTypes)) {
+ if bitfieldFixup.id < firstTypeID {
return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
}
@@ -1079,7 +1083,29 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
for _, assertion := range assertions {
if reflect.TypeOf(*assertion.typ) != assertion.want {
- return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ)
+ return nil, fmt.Errorf("type ID %d: expected %s, got %T", assertion.id, assertion.want, *assertion.typ)
+ }
+ }
+
+ for _, dt := range declTags {
+ switch t := dt.Type.(type) {
+ case *Var, *Typedef:
+ if dt.Index != -1 {
+ return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index)
+ }
+
+ case composite:
+ if dt.Index >= len(t.members()) {
+ return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t)
+ }
+
+ case *Func:
+ if dt.Index >= len(t.Type.(*FuncProto).Params) {
+ return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t)
+ }
+
+ default:
+ return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t)
}
}
@@ -1123,6 +1149,29 @@ func UnderlyingType(typ Type) Type {
return &cycle{typ}
}
+// as returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
+// until it finds a T.
+//
+// Returns the zero value and false if there is no T or if the type is nested
+// too deeply.
+func as[T Type](typ Type) (T, bool) {
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (typ).(type) {
+ case T:
+ return v, true
+ case qualifier:
+ typ = v.qualify()
+ case *Typedef:
+ typ = v.Type
+ default:
+ goto notFound
+ }
+ }
+notFound:
+ var zero T
+ return zero, false
+}
+
type formatState struct {
fmt.State
depth int
@@ -1145,10 +1194,7 @@ func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{})
return
}
- // This is the same as %T, but elides the package name. Assumes that
- // formattableType is implemented by a pointer receiver.
- goTypeName := reflect.TypeOf(t).Elem().Name()
- _, _ = io.WriteString(f, goTypeName)
+ _, _ = io.WriteString(f, internal.GoTypeName(t))
if name := t.TypeName(); name != "" {
// Output BTF type name if present.
diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go
new file mode 100644
index 000000000000..12a89b87eedb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go
@@ -0,0 +1,26 @@
+package btf
+
+// datasecResolveWorkaround ensures that certain vars in a Datasec are added
+// to a Spec before the Datasec. This avoids a bug in kernel BTF validation.
+//
+// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/
+func datasecResolveWorkaround(b *Builder, ds *Datasec) error {
+ for _, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ continue
+ }
+
+ switch v.Type.(type) {
+ case *Typedef, *Volatile, *Const, *Restrict, *typeTag:
+ // NB: We must never call Add on a Datasec, otherwise we risk
+ // infinite recursion.
+ _, err := b.Add(v.Type)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go
index 8c2ddc380214..fb720bebdb7b 100644
--- a/vendor/github.com/cilium/ebpf/collection.go
+++ b/vendor/github.com/cilium/ebpf/collection.go
@@ -9,6 +9,8 @@ import (
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/kconfig"
)
// CollectionOptions control loading a collection into the kernel.
@@ -107,12 +109,22 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
return nil
}
+// MissingConstantsError is returned by [CollectionSpec.RewriteConstants].
+type MissingConstantsError struct {
+ // The constants missing from .rodata.
+ Constants []string
+}
+
+func (m *MissingConstantsError) Error() string {
+ return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", "))
+}
+
// RewriteConstants replaces the value of multiple constants.
//
// The constant must be defined like so in the C program:
//
-// volatile const type foobar;
-// volatile const type foobar = default;
+// volatile const type foobar;
+// volatile const type foobar = default;
//
// Replacement values must be of the same length as the C sizeof(type).
// If necessary, they are marshalled according to the same rules as
@@ -120,7 +132,7 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
//
// From Linux 5.5 the verifier will use constants to eliminate dead code.
//
-// Returns an error if a constant doesn't exist.
+// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist.
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
replaced := make(map[string]bool)
@@ -151,6 +163,10 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
continue
}
+ if _, ok := v.Type.(*btf.Var); !ok {
+ return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname)
+ }
+
if replaced[vname] {
return fmt.Errorf("section %s: duplicate variable %s", name, vname)
}
@@ -180,7 +196,7 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
}
if len(missing) != 0 {
- return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ","))
+ return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing})
}
return nil
@@ -198,11 +214,11 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
// The tag's value specifies the name of the program or map as
// found in the CollectionSpec.
//
-// struct {
-// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
-// Bar *ebpf.MapSpec `ebpf:"bar_map"`
-// Ignored int
-// }
+// struct {
+// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
+// Bar *ebpf.MapSpec `ebpf:"bar_map"`
+// Ignored int
+// }
//
// Returns an error if any of the eBPF objects can't be found, or
// if the same MapSpec or ProgramSpec is assigned multiple times.
@@ -249,11 +265,11 @@ func (cs *CollectionSpec) Assign(to interface{}) error {
// dependent resources are loaded into the kernel and populated with values if
// specified.
//
-// struct {
-// Foo *ebpf.Program `ebpf:"xdp_foo"`
-// Bar *ebpf.Map `ebpf:"bar_map"`
-// Ignored int
-// }
+// struct {
+// Foo *ebpf.Program `ebpf:"xdp_foo"`
+// Bar *ebpf.Map `ebpf:"bar_map"`
+// Ignored int
+// }
//
// opts may be nil.
//
@@ -386,42 +402,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co
}, nil
}
-type handleCache struct {
- btfHandles map[*btf.Spec]*btf.Handle
-}
-
-func newHandleCache() *handleCache {
- return &handleCache{
- btfHandles: make(map[*btf.Spec]*btf.Handle),
- }
-}
-
-func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
- if hc.btfHandles[spec] != nil {
- return hc.btfHandles[spec], nil
- }
-
- handle, err := btf.NewHandle(spec)
- if err != nil {
- return nil, err
- }
-
- hc.btfHandles[spec] = handle
- return handle, nil
-}
-
-func (hc handleCache) close() {
- for _, handle := range hc.btfHandles {
- handle.Close()
- }
-}
-
type collectionLoader struct {
coll *CollectionSpec
opts *CollectionOptions
maps map[string]*Map
programs map[string]*Program
- handles *handleCache
}
func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) {
@@ -436,7 +421,7 @@ func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collec
return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name)
}
- if err := spec.checkCompatibility(m); err != nil {
+ if err := spec.Compatible(m); err != nil {
return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err)
}
}
@@ -446,13 +431,11 @@ func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collec
opts,
make(map[string]*Map),
make(map[string]*Program),
- newHandleCache(),
}, nil
}
// close all resources left over in the collectionLoader.
func (cl *collectionLoader) close() {
- cl.handles.close()
for _, m := range cl.maps {
m.Close()
}
@@ -471,10 +454,6 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
return nil, fmt.Errorf("missing map %s", mapName)
}
- if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF {
- return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName)
- }
-
if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok {
// Clone the map to avoid closing user's map later on.
m, err := replaceMap.Clone()
@@ -486,7 +465,7 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
return m, nil
}
- m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles)
+ m, err := newMapWithOptions(mapSpec, cl.opts.Maps)
if err != nil {
return nil, fmt.Errorf("map %s: %w", mapName, err)
}
@@ -511,10 +490,6 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName)
}
- if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF {
- return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName)
- }
-
progSpec = progSpec.Copy()
// Rewrite any reference to a valid map in the program's instructions,
@@ -543,7 +518,7 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
}
}
- prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles)
+ prog, err := newProgramWithOptions(progSpec, cl.opts.Programs)
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
@@ -559,17 +534,22 @@ func (cl *collectionLoader) populateMaps() error {
return fmt.Errorf("missing map spec %s", mapName)
}
- mapSpec = mapSpec.Copy()
-
// MapSpecs that refer to inner maps or programs within the same
// CollectionSpec do so using strings. These strings are used as the key
// to look up the respective object in the Maps or Programs fields.
// Resolve those references to actual Map or Program resources that
// have been loaded into the kernel.
- for i, kv := range mapSpec.Contents {
- if objName, ok := kv.Value.(string); ok {
- switch mapSpec.Type {
- case ProgramArray:
+ if mapSpec.Type.canStoreMap() || mapSpec.Type.canStoreProgram() {
+ mapSpec = mapSpec.Copy()
+
+ for i, kv := range mapSpec.Contents {
+ objName, ok := kv.Value.(string)
+ if !ok {
+ continue
+ }
+
+ switch t := mapSpec.Type; {
+ case t.canStoreProgram():
// loadProgram is idempotent and could return an existing Program.
prog, err := cl.loadProgram(objName)
if err != nil {
@@ -577,7 +557,7 @@ func (cl *collectionLoader) populateMaps() error {
}
mapSpec.Contents[i] = MapKV{kv.Key, prog}
- case ArrayOfMaps, HashOfMaps:
+ case t.canStoreMap():
// loadMap is idempotent and could return an existing Map.
innerMap, err := cl.loadMap(objName)
if err != nil {
@@ -597,6 +577,95 @@ func (cl *collectionLoader) populateMaps() error {
return nil
}
+// resolveKconfig resolves all variables declared in .kconfig and populates
+// m.Contents. Does nothing if the given m.Contents is non-empty.
+func resolveKconfig(m *MapSpec) error {
+ ds, ok := m.Value.(*btf.Datasec)
+ if !ok {
+ return errors.New("map value is not a Datasec")
+ }
+
+ type configInfo struct {
+ offset uint32
+ typ btf.Type
+ }
+
+ configs := make(map[string]configInfo)
+
+ data := make([]byte, ds.Size)
+ for _, vsi := range ds.Vars {
+ v := vsi.Type.(*btf.Var)
+ n := v.TypeName()
+
+ switch n {
+ case "LINUX_KERNEL_VERSION":
+ if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+ return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+ }
+
+ kv, err := internal.KernelVersion()
+ if err != nil {
+ return fmt.Errorf("getting kernel version: %w", err)
+ }
+ internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel())
+
+ case "LINUX_HAS_SYSCALL_WRAPPER":
+ if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+ return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+ }
+ var value uint32 = 1
+ if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) {
+ value = 0
+ } else if err != nil {
+ return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err)
+ }
+
+ internal.NativeEndian.PutUint32(data[vsi.Offset:], value)
+
+ default: // Catch CONFIG_*.
+ configs[n] = configInfo{
+ offset: vsi.Offset,
+ typ: v.Type,
+ }
+ }
+ }
+
+ // We only parse kconfig file if a CONFIG_* variable was found.
+ if len(configs) > 0 {
+ f, err := kconfig.Find()
+ if err != nil {
+ return fmt.Errorf("cannot find a kconfig file: %w", err)
+ }
+ defer f.Close()
+
+ filter := make(map[string]struct{}, len(configs))
+ for config := range configs {
+ filter[config] = struct{}{}
+ }
+
+ kernelConfig, err := kconfig.Parse(f, filter)
+ if err != nil {
+ return fmt.Errorf("cannot parse kconfig file: %w", err)
+ }
+
+ for n, info := range configs {
+ value, ok := kernelConfig[n]
+ if !ok {
+ return fmt.Errorf("config option %q does not exists for this kernel", n)
+ }
+
+ err := kconfig.PutValue(data[info.offset:], info.typ, value)
+ if err != nil {
+ return fmt.Errorf("problem adding value for %s: %w", n, err)
+ }
+ }
+ }
+
+ m.Contents = []MapKV{{uint32(0), data}}
+
+ return nil
+}
+
// LoadCollection reads an object file and creates and loads its declared
// resources into the kernel.
//
diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go
index df278895c631..8d92672eb144 100644
--- a/vendor/github.com/cilium/ebpf/elf_reader.go
+++ b/vendor/github.com/cilium/ebpf/elf_reader.go
@@ -18,6 +18,15 @@ import (
"github.com/cilium/ebpf/internal/unix"
)
+type kconfigMetaKey struct{}
+
+type kconfigMeta struct {
+ Map *MapSpec
+ Offset uint32
+}
+
+type kfuncMeta struct{}
+
// elfCode is a convenience to reduce the amount of arguments that have to
// be passed around explicitly. You should treat its contents as immutable.
type elfCode struct {
@@ -27,6 +36,9 @@ type elfCode struct {
version uint32
btf *btf.Spec
extInfo *btf.ExtInfos
+ maps map[string]*MapSpec
+ kfuncs map[string]*btf.Func
+ kconfig *MapSpec
}
// LoadCollectionSpec parses an ELF file into a CollectionSpec.
@@ -51,6 +63,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, err
}
+ // Checks if the ELF file is for BPF data.
+ // Old LLVM versions set e_machine to EM_NONE.
+ if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF {
+ return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine)
+ }
+
var (
licenseSection *elf.Section
versionSection *elf.Section
@@ -107,6 +125,8 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
version: version,
btf: btfSpec,
extInfo: btfExtInfo,
+ maps: make(map[string]*MapSpec),
+ kfuncs: make(map[string]*btf.Func),
}
symbols, err := f.Symbols()
@@ -120,27 +140,33 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, fmt.Errorf("load relocations: %w", err)
}
- // Collect all the various ways to define maps.
- maps := make(map[string]*MapSpec)
- if err := ec.loadMaps(maps); err != nil {
+ if err := ec.loadMaps(); err != nil {
return nil, fmt.Errorf("load maps: %w", err)
}
- if err := ec.loadBTFMaps(maps); err != nil {
+ if err := ec.loadBTFMaps(); err != nil {
return nil, fmt.Errorf("load BTF maps: %w", err)
}
- if err := ec.loadDataSections(maps); err != nil {
+ if err := ec.loadDataSections(); err != nil {
return nil, fmt.Errorf("load data sections: %w", err)
}
+ if err := ec.loadKconfigSection(); err != nil {
+ return nil, fmt.Errorf("load virtual .kconfig section: %w", err)
+ }
+
+ if err := ec.loadKsymsSection(); err != nil {
+ return nil, fmt.Errorf("load virtual .ksyms section: %w", err)
+ }
+
// Finally, collect programs and link them.
progs, err := ec.loadProgramSections()
if err != nil {
return nil, fmt.Errorf("load programs: %w", err)
}
- return &CollectionSpec{maps, progs, btfSpec, ec.ByteOrder}, nil
+ return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil
}
func loadLicense(sec *elf.Section) (string, error) {
@@ -261,10 +287,6 @@ func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section
return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
}
- if target.Flags&elf.SHF_STRINGS > 0 {
- return fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
- }
-
target.references++
}
@@ -312,7 +334,6 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
KernelVersion: ec.version,
Instructions: insns,
ByteOrder: ec.ByteOrder,
- BTF: ec.btf,
}
// Function names must be unique within a single ELF blob.
@@ -565,6 +586,10 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
return fmt.Errorf("neither a call nor a load instruction: %v", ins)
}
+ // The Undefined section is used for 'virtual' symbols that aren't backed by
+ // an ELF section. This includes symbol references from inline asm, forward
+ // function declarations, as well as extern kfunc declarations using __ksym
+ // and extern kconfig variables declared using __kconfig.
case undefSection:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
@@ -574,7 +599,36 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
}
- // There is nothing to do here but set ins.Reference.
+ kf := ec.kfuncs[name]
+ switch {
+ // If a Call instruction is found and the datasec has a btf.Func with a Name
+ // that matches the symbol name we mark the instruction as a call to a kfunc.
+ case kf != nil && ins.OpCode.JumpOp() == asm.Call:
+ ins.Metadata.Set(kfuncMeta{}, kf)
+ ins.Src = asm.PseudoKfuncCall
+ ins.Constant = -1
+
+ // If no kconfig map is found, this must be a symbol reference from inline
+ // asm (see testdata/loader.c:asm_relocation()) or a call to a forward
+ // function declaration (see testdata/fwd_decl.c). Don't interfere, These
+ // remain standard symbol references.
+ // extern __kconfig reads are represented as dword loads that need to be
+ // rewritten to pseudo map loads from .kconfig. If the map is present,
+ // require it to contain the symbol to disambiguate between inline asm
+ // relos and kconfigs.
+ case ec.kconfig != nil && ins.OpCode.IsDWordLoad():
+ for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars {
+ if vsi.Type.(*btf.Var).Name != rel.Name {
+ continue
+ }
+
+ ins.Src = asm.PseudoMapValue
+ ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset})
+ return nil
+ }
+
+ return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name)
+ }
default:
return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
@@ -584,7 +638,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
return nil
}
-func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
+func (ec *elfCode) loadMaps() error {
for _, sec := range ec.sections {
if sec.kind != mapSection {
continue
@@ -610,7 +664,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
}
mapName := mapSym.Name
- if maps[mapName] != nil {
+ if ec.maps[mapName] != nil {
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
}
@@ -644,7 +698,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("map %s: %w", mapName, err)
}
- maps[mapName] = &spec
+ ec.maps[mapName] = &spec
}
}
@@ -654,7 +708,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
// loadBTFMaps iterates over all ELF sections marked as BTF map sections
// (like .maps) and parses them into MapSpecs. Dump the .maps section and
// any relocations with `readelf -x .maps -r `.
-func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
+func (ec *elfCode) loadBTFMaps() error {
for _, sec := range ec.sections {
if sec.kind != btfMapSection {
continue
@@ -693,7 +747,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported)
}
- if maps[name] != nil {
+ if ec.maps[name] != nil {
return fmt.Errorf("section %v: map %s already exists", sec.Name, name)
}
@@ -712,7 +766,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("map %v: %w", name, err)
}
- maps[name] = mapSpec
+ ec.maps[name] = mapSpec
}
// Drain the ELF section reader to make sure all bytes are accounted for
@@ -901,13 +955,6 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b
}
}
- if key == nil {
- key = &btf.Void{}
- }
- if value == nil {
- value = &btf.Void{}
- }
-
return &MapSpec{
Name: SanitizeName(name, -1),
Type: MapType(mapType),
@@ -917,7 +964,6 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b
Flags: flags,
Key: key,
Value: value,
- BTF: spec,
Pinning: pinType,
InnerMap: innerMapSpec,
Contents: contents,
@@ -1008,14 +1054,14 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem
case elf.STT_OBJECT:
contents = append(contents, MapKV{uint32(k), r.Name})
default:
- return nil, fmt.Errorf("unknown relocation type %v", t)
+ return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name)
}
}
return contents, nil
}
-func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
+func (ec *elfCode) loadDataSections() error {
for _, sec := range ec.sections {
if sec.kind != dataSection {
continue
@@ -1027,22 +1073,33 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
continue
}
- data, err := sec.Data()
- if err != nil {
- return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
- }
-
- if uint64(len(data)) > math.MaxUint32 {
- return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
- }
-
mapSpec := &MapSpec{
Name: SanitizeName(sec.Name, -1),
Type: Array,
KeySize: 4,
- ValueSize: uint32(len(data)),
+ ValueSize: uint32(sec.Size),
MaxEntries: 1,
- Contents: []MapKV{{uint32(0), data}},
+ }
+
+ switch sec.Type {
+ // Only open the section if we know there's actual data to be read.
+ case elf.SHT_PROGBITS:
+ data, err := sec.Data()
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
+ }
+
+ if uint64(len(data)) > math.MaxUint32 {
+ return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
+ }
+ mapSpec.Contents = []MapKV{{uint32(0), data}}
+
+ case elf.SHT_NOBITS:
+ // NOBITS sections like .bss contain only zeroes, and since data sections
+ // are Arrays, the kernel already preallocates them. Skip reading zeroes
+ // from the ELF.
+ default:
+ return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type)
}
// It is possible for a data section to exist without a corresponding BTF Datasec
@@ -1051,23 +1108,78 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
var ds *btf.Datasec
if ec.btf.TypeByName(sec.Name, &ds) == nil {
// Assign the spec's key and BTF only if the Datasec lookup was successful.
- mapSpec.BTF = ec.btf
mapSpec.Key = &btf.Void{}
mapSpec.Value = ds
}
}
- switch n := sec.Name; {
- case strings.HasPrefix(n, ".rodata"):
+ if strings.HasPrefix(sec.Name, ".rodata") {
mapSpec.Flags = unix.BPF_F_RDONLY_PROG
mapSpec.Freeze = true
- case n == ".bss":
- // The kernel already zero-initializes the map
- mapSpec.Contents = nil
}
- maps[sec.Name] = mapSpec
+ ec.maps[sec.Name] = mapSpec
}
+
+ return nil
+}
+
+// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKconfigSection() error {
+ if ec.btf == nil {
+ return nil
+ }
+
+ var ds *btf.Datasec
+ err := ec.btf.TypeByName(".kconfig", &ds)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if ds.Size == 0 {
+ return errors.New("zero-length .kconfig")
+ }
+
+ ec.kconfig = &MapSpec{
+ Name: ".kconfig",
+ Type: Array,
+ KeySize: uint32(4),
+ ValueSize: ds.Size,
+ MaxEntries: 1,
+ Flags: unix.BPF_F_RDONLY_PROG | unix.BPF_F_MMAPABLE,
+ Freeze: true,
+ Key: &btf.Int{Size: 4},
+ Value: ds,
+ }
+
+ return nil
+}
+
+// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKsymsSection() error {
+ if ec.btf == nil {
+ return nil
+ }
+
+ var ds *btf.Datasec
+ err := ec.btf.TypeByName(".ksyms", &ds)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ for _, v := range ds.Vars {
+ // we have already checked the .ksyms Datasec to only contain Func Vars.
+ ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func)
+ }
+
return nil
}
@@ -1107,9 +1219,13 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
{"lsm/", LSM, AttachLSMMac, 0},
{"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE},
{"iter/", Tracing, AttachTraceIter, 0},
+ {"iter.s/", Tracing, AttachTraceIter, unix.BPF_F_SLEEPABLE},
{"syscall", Syscall, AttachNone, 0},
+ {"xdp.frags_devmap/", XDP, AttachXDPDevMap, unix.BPF_F_XDP_HAS_FRAGS},
{"xdp_devmap/", XDP, AttachXDPDevMap, 0},
+ {"xdp.frags_cpumap/", XDP, AttachXDPCPUMap, unix.BPF_F_XDP_HAS_FRAGS},
{"xdp_cpumap/", XDP, AttachXDPCPUMap, 0},
+ {"xdp.frags", XDP, AttachNone, unix.BPF_F_XDP_HAS_FRAGS},
{"xdp", XDP, AttachNone, 0},
{"perf_event", PerfEvent, AttachNone, 0},
{"lwt_in", LWTIn, AttachNone, 0},
@@ -1149,8 +1265,9 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
{"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0},
{"struct_ops+", StructOps, AttachNone, 0},
{"sk_lookup/", SkLookup, AttachSkLookup, 0},
-
{"seccomp", SocketFilter, AttachNone, 0},
+ {"kprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
+ {"kretprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
}
for _, t := range types {
diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go
index ae77bc6197f2..a02e8a41618c 100644
--- a/vendor/github.com/cilium/ebpf/info.go
+++ b/vendor/github.com/cilium/ebpf/info.go
@@ -48,7 +48,7 @@ func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
info.KeySize,
info.ValueSize,
info.MaxEntries,
- info.MapFlags,
+ uint32(info.MapFlags),
unix.ByteSliceToString(info.Name[:]),
}, nil
}
@@ -94,8 +94,10 @@ type ProgramInfo struct {
// Name as supplied by user space at load time. Available from 4.15.
Name string
- btf btf.ID
- stats *programStats
+ createdByUID uint32
+ haveCreatedByUID bool
+ btf btf.ID
+ stats *programStats
maps []MapID
insns []byte
@@ -130,6 +132,18 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
pi.maps = make([]MapID, info.NrMapIds)
info2.NrMapIds = info.NrMapIds
info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0]))
+ } else if haveProgramInfoMapIDs() == nil {
+ // This program really has no associated maps.
+ pi.maps = make([]MapID, 0)
+ } else {
+ // The kernel doesn't report associated maps.
+ pi.maps = nil
+ }
+
+ // createdByUID and NrMapIds were introduced in the same kernel version.
+ if pi.maps != nil {
+ pi.createdByUID = info.CreatedByUid
+ pi.haveCreatedByUID = true
}
if info.XlatedProgLen > 0 {
@@ -175,6 +189,15 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) {
return pi.id, pi.id > 0
}
+// CreatedByUID returns the Uid that created the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) CreatedByUID() (uint32, bool) {
+ return pi.createdByUID, pi.haveCreatedByUID
+}
+
// BTFID returns the BTF ID associated with the program.
//
// The ID is only valid as long as the associated program is kept alive.
@@ -321,3 +344,30 @@ func EnableStats(which uint32) (io.Closer, error) {
}
return fd, nil
}
+
+var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error {
+ prog, err := progLoad(asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ }, SocketFilter, "MIT")
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ err = sys.ObjInfo(prog, &sys.ProgInfo{
+ // NB: Don't need to allocate MapIds since the program isn't using
+ // any maps.
+ NrMapIds: 1,
+ })
+ if errors.Is(err, unix.EINVAL) {
+ // Most likely the syscall doesn't exist.
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.E2BIG) {
+ // We've hit check_uarg_tail_zero on older kernels.
+ return internal.ErrNotSupported
+ }
+
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/align.go b/vendor/github.com/cilium/ebpf/internal/align.go
index 8b4f2658eacb..edc898fa968b 100644
--- a/vendor/github.com/cilium/ebpf/internal/align.go
+++ b/vendor/github.com/cilium/ebpf/internal/align.go
@@ -1,6 +1,8 @@
package internal
+import "golang.org/x/exp/constraints"
+
// Align returns 'n' updated to 'alignment' boundary.
-func Align(n, alignment int) int {
- return (int(n) + alignment - 1) / alignment * alignment
+func Align[I constraints.Integer](n, alignment I) I {
+ return (n + alignment - 1) / alignment * alignment
}
diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go
new file mode 100644
index 000000000000..81c6544330f8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/buffer.go
@@ -0,0 +1,31 @@
+package internal
+
+import (
+ "bytes"
+ "sync"
+)
+
+var bytesBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it.
+//
+// The returned buffer should be passed to [PutBuffer].
+func NewBuffer(buf []byte) *bytes.Buffer {
+ wr := bytesBufferPool.Get().(*bytes.Buffer)
+ // Reinitialize the Buffer with a new backing slice since it is returned to
+ // the caller by wr.Bytes() below. Pooling is faster despite calling
+ // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed.
+ *wr = *bytes.NewBuffer(buf)
+ return wr
+}
+
+// PutBuffer releases a buffer to the pool.
+func PutBuffer(buf *bytes.Buffer) {
+ // Release reference to the backing buffer.
+ *buf = *bytes.NewBuffer(nil)
+ bytesBufferPool.Put(buf)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go
index 3affa1efb9db..9e908b610b5f 100644
--- a/vendor/github.com/cilium/ebpf/internal/cpu.go
+++ b/vendor/github.com/cilium/ebpf/internal/cpu.go
@@ -4,24 +4,13 @@ import (
"fmt"
"os"
"strings"
- "sync"
)
-var sysCPU struct {
- once sync.Once
- err error
- num int
-}
-
// PossibleCPUs returns the max number of CPUs a system may possibly have
// Logical CPU numbers must be of the form 0-n
-func PossibleCPUs() (int, error) {
- sysCPU.once.Do(func() {
- sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
- })
-
- return sysCPU.num, sysCPU.err
-}
+var PossibleCPUs = Memoize(func() (int, error) {
+ return parseCPUsFromFile("/sys/devices/system/cpu/possible")
+})
func parseCPUsFromFile(path string) (int, error) {
spec, err := os.ReadFile(path)
diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go
new file mode 100644
index 000000000000..e3a30502159b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/deque.go
@@ -0,0 +1,91 @@
+package internal
+
+import "math/bits"
+
+// Deque implements a double ended queue.
+type Deque[T any] struct {
+ elems []T
+ read, write uint64
+ mask uint64
+}
+
+// Reset clears the contents of the deque while retaining the backing buffer.
+func (dq *Deque[T]) Reset() {
+ var zero T
+
+ for i := dq.read; i < dq.write; i++ {
+ dq.elems[i&dq.mask] = zero
+ }
+
+ dq.read, dq.write = 0, 0
+}
+
+func (dq *Deque[T]) Empty() bool {
+ return dq.read == dq.write
+}
+
+// Push adds an element to the end.
+func (dq *Deque[T]) Push(e T) {
+ dq.Grow(1)
+ dq.elems[dq.write&dq.mask] = e
+ dq.write++
+}
+
+// Shift returns the first element or the zero value.
+func (dq *Deque[T]) Shift() T {
+ var zero T
+
+ if dq.Empty() {
+ return zero
+ }
+
+ index := dq.read & dq.mask
+ t := dq.elems[index]
+ dq.elems[index] = zero
+ dq.read++
+ return t
+}
+
+// Pop returns the last element or the zero value.
+func (dq *Deque[T]) Pop() T {
+ var zero T
+
+ if dq.Empty() {
+ return zero
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.elems[index]
+ dq.elems[index] = zero
+ return t
+}
+
+// Grow the deque's capacity, if necessary, to guarantee space for another n
+// elements.
+func (dq *Deque[T]) Grow(n int) {
+ have := dq.write - dq.read
+ need := have + uint64(n)
+ if need < have {
+ panic("overflow")
+ }
+ if uint64(len(dq.elems)) >= need {
+ return
+ }
+
+ // Round up to the new power of two which is at least 8.
+ // See https://jameshfisher.com/2018/03/30/round-up-power-2/
+ capacity := 1 << (64 - bits.LeadingZeros64(need-1))
+ if capacity < 8 {
+ capacity = 8
+ }
+
+ elems := make([]T, have, capacity)
+ pivot := dq.read & dq.mask
+ copied := copy(elems, dq.elems[pivot:])
+ copy(elems[copied:], dq.elems[:pivot])
+
+ dq.elems = elems[:capacity]
+ dq.mask = uint64(capacity) - 1
+ dq.read, dq.write = 0, have
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go
index ad33cda8511b..96a2ac0de22f 100644
--- a/vendor/github.com/cilium/ebpf/internal/endian_be.go
+++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go
@@ -1,5 +1,4 @@
//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
-// +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
package internal
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go
index 41a68224c833..fde4c55a6f5c 100644
--- a/vendor/github.com/cilium/ebpf/internal/endian_le.go
+++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go
@@ -1,5 +1,4 @@
-//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
-// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64
+//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
package internal
diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go
index b5ccdd7d0531..bda01e2fde58 100644
--- a/vendor/github.com/cilium/ebpf/internal/errors.go
+++ b/vendor/github.com/cilium/ebpf/internal/errors.go
@@ -7,32 +7,25 @@ import (
"strings"
)
-// ErrorWithLog returns an error which includes logs from the kernel verifier.
+// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier
+// log buffer.
//
// The default error output is a summary of the full log. The latter can be
// accessed via VerifierError.Log or by formatting the error, see Format.
-//
-// A set of heuristics is used to determine whether the log has been truncated.
-func ErrorWithLog(err error, log []byte) *VerifierError {
+func ErrorWithLog(source string, err error, log []byte, truncated bool) *VerifierError {
const whitespace = "\t\r\v\n "
// Convert verifier log C string by truncating it on the first 0 byte
// and trimming trailing whitespace before interpreting as a Go string.
- truncated := false
if i := bytes.IndexByte(log, 0); i != -1 {
- if i == len(log)-1 && !bytes.HasSuffix(log[:i], []byte{'\n'}) {
- // The null byte is at the end of the buffer and it's not preceded
- // by a newline character. Most likely the buffer was too short.
- truncated = true
- }
-
log = log[:i]
- } else if len(log) > 0 {
- // No null byte? Dodgy!
- truncated = true
}
log = bytes.Trim(log, whitespace)
+ if len(log) == 0 {
+ return &VerifierError{source, err, nil, truncated}
+ }
+
logLines := bytes.Split(log, []byte{'\n'})
lines := make([]string, 0, len(logLines))
for _, line := range logLines {
@@ -41,13 +34,14 @@ func ErrorWithLog(err error, log []byte) *VerifierError {
lines = append(lines, string(bytes.TrimRight(line, whitespace)))
}
- return &VerifierError{err, lines, truncated}
+ return &VerifierError{source, err, lines, truncated}
}
// VerifierError includes information from the eBPF verifier.
//
// It summarises the log output, see Format if you want to output the full contents.
type VerifierError struct {
+ source string
// The error which caused this error.
Cause error
// The verifier output split into lines.
@@ -67,9 +61,12 @@ func (le *VerifierError) Error() string {
log = log[:n-1]
}
+ var b strings.Builder
+ fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error())
+
n := len(log)
if n == 0 {
- return le.Cause.Error()
+ return b.String()
}
lines := log[n-1:]
@@ -78,14 +75,9 @@ func (le *VerifierError) Error() string {
lines = log[n-2:]
}
- var b strings.Builder
- fmt.Fprintf(&b, "%s: ", le.Cause.Error())
-
- for i, line := range lines {
+ for _, line := range lines {
+ b.WriteString(": ")
b.WriteString(strings.TrimSpace(line))
- if i != len(lines)-1 {
- b.WriteString(": ")
- }
}
omitted := len(le.Log) - len(lines)
@@ -143,8 +135,8 @@ func includePreviousLine(line string) bool {
// Understood verbs are %s and %v, which are equivalent to calling Error(). %v
// allows outputting additional information using the following flags:
//
-// + Output the first lines, or all lines if no width is given.
-// - Output the last lines, or all lines if no width is given.
+// %+v: Output the first lines, or all lines if no width is given.
+// %-v: Output the last lines, or all lines if no width is given.
//
// Use width to specify how many lines to output. Use the '-' flag to output
// lines from the end of the log instead of the beginning.
@@ -174,7 +166,7 @@ func (le *VerifierError) Format(f fmt.State, verb rune) {
return
}
- fmt.Fprintf(f, "%s:", le.Cause.Error())
+ fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error())
omitted := len(le.Log) - n
lines := le.Log[:n]
diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go
index 0a6c2d1d528c..b1f650751deb 100644
--- a/vendor/github.com/cilium/ebpf/internal/feature.go
+++ b/vendor/github.com/cilium/ebpf/internal/feature.go
@@ -31,10 +31,20 @@ func (ufe *UnsupportedFeatureError) Is(target error) bool {
return target == ErrNotSupported
}
-type featureTest struct {
- sync.RWMutex
- successful bool
- result error
+// FeatureTest caches the result of a [FeatureTestFn].
+//
+// Fields should not be modified after creation.
+type FeatureTest struct {
+ // The name of the feature being detected.
+ Name string
+ // Version in in the form Major.Minor[.Patch].
+ Version string
+ // The feature test itself.
+ Fn FeatureTestFn
+
+ mu sync.RWMutex
+ done bool
+ result error
}
// FeatureTestFn is used to determine whether the kernel supports
@@ -42,59 +52,133 @@ type featureTest struct {
//
// The return values have the following semantics:
//
-// err == ErrNotSupported: the feature is not available
-// err == nil: the feature is available
-// err != nil: the test couldn't be executed
+// err == ErrNotSupported: the feature is not available
+// err == nil: the feature is available
+// err != nil: the test couldn't be executed
type FeatureTestFn func() error
-// FeatureTest wraps a function so that it is run at most once.
+// NewFeatureTest is a convenient way to create a single [FeatureTest].
+func NewFeatureTest(name, version string, fn FeatureTestFn) func() error {
+ ft := &FeatureTest{
+ Name: name,
+ Version: version,
+ Fn: fn,
+ }
+
+ return ft.execute
+}
+
+// execute the feature test.
//
-// name should identify the tested feature, while version must be in the
-// form Major.Minor[.Patch].
+// The result is cached if the test is conclusive.
//
-// Returns an error wrapping ErrNotSupported if the feature is not supported.
-func FeatureTest(name, version string, fn FeatureTestFn) func() error {
- ft := new(featureTest)
- return func() error {
- ft.RLock()
- if ft.successful {
- defer ft.RUnlock()
- return ft.result
- }
- ft.RUnlock()
- ft.Lock()
- defer ft.Unlock()
- // check one more time on the off
- // chance that two go routines
- // were able to call into the write
- // lock
- if ft.successful {
- return ft.result
- }
- err := fn()
- switch {
- case errors.Is(err, ErrNotSupported):
- v, err := NewVersion(version)
- if err != nil {
- return err
- }
+// See [FeatureTestFn] for the meaning of the returned error.
+func (ft *FeatureTest) execute() error {
+ ft.mu.RLock()
+ result, done := ft.result, ft.done
+ ft.mu.RUnlock()
- ft.result = &UnsupportedFeatureError{
- MinimumVersion: v,
- Name: name,
- }
- fallthrough
+ if done {
+ return result
+ }
+
+ ft.mu.Lock()
+ defer ft.mu.Unlock()
+
+ // The test may have been executed by another caller while we were
+ // waiting to acquire ft.mu.
+ if ft.done {
+ return ft.result
+ }
+
+ err := ft.Fn()
+ if err == nil {
+ ft.done = true
+ return nil
+ }
- case err == nil:
- ft.successful = true
+ if errors.Is(err, ErrNotSupported) {
+ var v Version
+ if ft.Version != "" {
+ v, err = NewVersion(ft.Version)
+ if err != nil {
+ return fmt.Errorf("feature %s: %w", ft.Name, err)
+ }
+ }
- default:
- // We couldn't execute the feature test to a point
- // where it could make a determination.
- // Don't cache the result, just return it.
- return fmt.Errorf("detect support for %s: %w", name, err)
+ ft.done = true
+ ft.result = &UnsupportedFeatureError{
+ MinimumVersion: v,
+ Name: ft.Name,
}
return ft.result
}
+
+ // We couldn't execute the feature test to a point
+ // where it could make a determination.
+ // Don't cache the result, just return it.
+ return fmt.Errorf("detect support for %s: %w", ft.Name, err)
+}
+
+// FeatureMatrix groups multiple related feature tests into a map.
+//
+// Useful when there is a small number of discrete features which are known
+// at compile time.
+//
+// It must not be modified concurrently with calling [FeatureMatrix.Result].
+type FeatureMatrix[K comparable] map[K]*FeatureTest
+
+// Result returns the outcome of the feature test for the given key.
+//
+// It's safe to call this function concurrently.
+func (fm FeatureMatrix[K]) Result(key K) error {
+ ft, ok := fm[key]
+ if !ok {
+ return fmt.Errorf("no feature probe for %v", key)
+ }
+
+ return ft.execute()
+}
+
+// FeatureCache caches a potentially unlimited number of feature probes.
+//
+// Useful when there is a high cardinality for a feature test.
+type FeatureCache[K comparable] struct {
+ mu sync.RWMutex
+ newTest func(K) *FeatureTest
+ features map[K]*FeatureTest
+}
+
+func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] {
+ return &FeatureCache[K]{
+ newTest: newTest,
+ features: make(map[K]*FeatureTest),
+ }
+}
+
+func (fc *FeatureCache[K]) Result(key K) error {
+ // NB: Executing the feature test happens without fc.mu taken.
+ return fc.retrieve(key).execute()
+}
+
+func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest {
+ fc.mu.RLock()
+ ft := fc.features[key]
+ fc.mu.RUnlock()
+
+ if ft != nil {
+ return ft
+ }
+
+ fc.mu.Lock()
+ defer fc.mu.Unlock()
+
+ if ft := fc.features[key]; ft != nil {
+ return ft
+ }
+
+ ft = fc.newTest(key)
+ fc.features[key] = ft
+ return ft
}
diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go
index 30b6641f0761..1eaf4775ad7c 100644
--- a/vendor/github.com/cilium/ebpf/internal/io.go
+++ b/vendor/github.com/cilium/ebpf/internal/io.go
@@ -2,10 +2,14 @@ package internal
import (
"bufio"
+ "bytes"
"compress/gzip"
"errors"
+ "fmt"
"io"
"os"
+ "path/filepath"
+ "sync"
)
// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
@@ -60,3 +64,65 @@ func ReadAllCompressed(file string) ([]byte, error) {
return io.ReadAll(gz)
}
+
+// ReadUint64FromFile reads a uint64 from a file.
+//
+// format specifies the contents of the file in fmt.Scanf syntax.
+func ReadUint64FromFile(format string, path ...string) (uint64, error) {
+ filename := filepath.Join(path...)
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return 0, fmt.Errorf("reading file %q: %w", filename, err)
+ }
+
+ var value uint64
+ n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
+ if err != nil {
+ return 0, fmt.Errorf("parsing file %q: %w", filename, err)
+ }
+ if n != 1 {
+ return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
+ }
+
+ return value, nil
+}
+
+type uint64FromFileKey struct {
+ format, path string
+}
+
+var uint64FromFileCache = struct {
+ sync.RWMutex
+ values map[uint64FromFileKey]uint64
+}{
+ values: map[uint64FromFileKey]uint64{},
+}
+
+// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result.
+func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) {
+ filename := filepath.Join(path...)
+ key := uint64FromFileKey{format, filename}
+
+ uint64FromFileCache.RLock()
+ if value, ok := uint64FromFileCache.values[key]; ok {
+ uint64FromFileCache.RUnlock()
+ return value, nil
+ }
+ uint64FromFileCache.RUnlock()
+
+ value, err := ReadUint64FromFile(format, filename)
+ if err != nil {
+ return 0, err
+ }
+
+ uint64FromFileCache.Lock()
+ defer uint64FromFileCache.Unlock()
+
+ if value, ok := uint64FromFileCache.values[key]; ok {
+ // Someone else got here before us, use what is cached.
+ return value, nil
+ }
+
+ uint64FromFileCache.values[key] = value
+ return value, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
new file mode 100644
index 000000000000..d95e7eb0e5d2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
@@ -0,0 +1,267 @@
+package kconfig
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+)
+
+// Find find a kconfig file on the host.
+// It first reads from /boot/config- of the current running kernel and tries
+// /proc/config.gz if nothing was found in /boot.
+// If none of the file provide a kconfig, it returns an error.
+func Find() (*os.File, error) {
+ kernelRelease, err := internal.KernelRelease()
+ if err != nil {
+ return nil, fmt.Errorf("cannot get kernel release: %w", err)
+ }
+
+ path := "/boot/config-" + kernelRelease
+ f, err := os.Open(path)
+ if err == nil {
+ return f, nil
+ }
+
+ f, err = os.Open("/proc/config.gz")
+ if err == nil {
+ return f, nil
+ }
+
+ return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path)
+}
+
+// Parse parses the kconfig file for which a reader is given.
+// All the CONFIG_* which are in filter and which are set set will be
+// put in the returned map as key with their corresponding value as map value.
+// If filter is nil, no filtering will occur.
+// If the kconfig file is not valid, error will be returned.
+func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) {
+ var r io.Reader
+ zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64))
+ if err != nil {
+ r = io.NewSectionReader(source, 0, math.MaxInt64)
+ } else {
+ // Source is gzip compressed, transparently decompress.
+ r = zr
+ }
+
+ ret := make(map[string]string, len(filter))
+
+ s := bufio.NewScanner(r)
+
+ for s.Scan() {
+ line := s.Bytes()
+ err = processKconfigLine(line, ret, filter)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse line: %w", err)
+ }
+
+ if filter != nil && len(ret) == len(filter) {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, fmt.Errorf("cannot parse: %w", err)
+ }
+
+ if zr != nil {
+ return ret, zr.Close()
+ }
+
+ return ret, nil
+}
+
+// Golang translation of libbpf bpf_object__process_kconfig_line():
+// /~https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874
+// It does the same checks but does not put the data inside the BPF map.
+func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error {
+ // Ignore empty lines and "# CONFIG_* is not set".
+ if !bytes.HasPrefix(line, []byte("CONFIG_")) {
+ return nil
+ }
+
+ key, value, found := bytes.Cut(line, []byte{'='})
+ if !found {
+ return fmt.Errorf("line %q does not contain separator '='", line)
+ }
+
+ if len(value) == 0 {
+ return fmt.Errorf("line %q has no value", line)
+ }
+
+ if filter != nil {
+ // NB: map[string(key)] gets special optimisation help from the compiler
+ // and doesn't allocate. Don't turn this into a variable.
+ _, ok := filter[string(key)]
+ if !ok {
+ return nil
+ }
+ }
+
+ // This can seem odd, but libbpf only sets the value the first time the key is
+ // met:
+ // /~https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908
+ _, ok := m[string(key)]
+ if !ok {
+ m[string(key)] = string(value)
+ }
+
+ return nil
+}
+
+// PutValue translates the value given as parameter depending on the BTF
+// type, the translated value is then written to the byte array.
+func PutValue(data []byte, typ btf.Type, value string) error {
+ typ = btf.UnderlyingType(typ)
+
+ switch value {
+ case "y", "n", "m":
+ return putValueTri(data, typ, value)
+ default:
+ if strings.HasPrefix(value, `"`) {
+ return putValueString(data, typ, value)
+ }
+ return putValueNumber(data, typ, value)
+ }
+}
+
+// Golang translation of libbpf_tristate enum:
+// /~https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169
+type triState int
+
+const (
+ TriNo triState = 0
+ TriYes triState = 1
+ TriModule triState = 2
+)
+
+func putValueTri(data []byte, typ btf.Type, value string) error {
+ switch v := typ.(type) {
+ case *btf.Int:
+ if v.Encoding != btf.Bool {
+ return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding)
+ }
+
+ if v.Size != 1 {
+ return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size)
+ }
+
+ switch value {
+ case "y":
+ data[0] = 1
+ case "n":
+ data[0] = 0
+ default:
+ return fmt.Errorf("cannot use %q for btf.Bool", value)
+ }
+ case *btf.Enum:
+ if v.Name != "libbpf_tristate" {
+ return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name)
+ }
+
+ var tri triState
+ switch value {
+ case "y":
+ tri = TriYes
+ case "m":
+ tri = TriModule
+ case "n":
+ tri = TriNo
+ default:
+ return fmt.Errorf("value %q is not support for libbpf_tristate", value)
+ }
+
+ internal.NativeEndian.PutUint64(data, uint64(tri))
+ default:
+ return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v)
+ }
+
+ return nil
+}
+
+func putValueString(data []byte, typ btf.Type, value string) error {
+ array, ok := typ.(*btf.Array)
+ if !ok {
+ return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array)
+ }
+
+ contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int)
+ if !ok {
+ return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType)
+ }
+
+ // Any Int, which is not bool, of one byte could be used to store char:
+ // /~https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638
+ if contentType.Size != 1 && contentType.Encoding != btf.Bool {
+ return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size)
+ }
+
+ if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) {
+ return fmt.Errorf(`value %q must start and finish with '"'`, value)
+ }
+
+ str := strings.Trim(value, `"`)
+
+ // We need to trim string if the bpf array is smaller.
+ if uint32(len(str)) >= array.Nelems {
+ str = str[:array.Nelems]
+ }
+
+ // Write the string content to .kconfig.
+ copy(data, str)
+
+ return nil
+}
+
+func putValueNumber(data []byte, typ btf.Type, value string) error {
+ integer, ok := typ.(*btf.Int)
+ if !ok {
+ return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer)
+ }
+
+ size := integer.Size
+ sizeInBits := size * 8
+
+ var n uint64
+ var err error
+ if integer.Encoding == btf.Signed {
+ parsed, e := strconv.ParseInt(value, 0, int(sizeInBits))
+
+ n = uint64(parsed)
+ err = e
+ } else {
+ parsed, e := strconv.ParseUint(value, 0, int(sizeInBits))
+
+ n = uint64(parsed)
+ err = e
+ }
+
+ if err != nil {
+ return fmt.Errorf("cannot parse value: %w", err)
+ }
+
+ switch size {
+ case 1:
+ data[0] = byte(n)
+ case 2:
+ internal.NativeEndian.PutUint16(data, uint16(n))
+ case 4:
+ internal.NativeEndian.PutUint32(data, uint32(n))
+ case 8:
+ internal.NativeEndian.PutUint64(data, uint64(n))
+ default:
+ return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", size)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/memoize.go b/vendor/github.com/cilium/ebpf/internal/memoize.go
new file mode 100644
index 000000000000..3de0a3fb95a5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/memoize.go
@@ -0,0 +1,26 @@
+package internal
+
+import (
+ "sync"
+)
+
+type memoizedFunc[T any] struct {
+ once sync.Once
+ fn func() (T, error)
+ result T
+ err error
+}
+
+func (mf *memoizedFunc[T]) do() (T, error) {
+ mf.once.Do(func() {
+ mf.result, mf.err = mf.fn()
+ })
+ return mf.result, mf.err
+}
+
+// Memoize the result of a function call.
+//
+// fn is only ever called once, even if it returns an error.
+func Memoize[T any](fn func() (T, error)) func() (T, error) {
+ return (&memoizedFunc[T]{fn: fn}).do
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go
index aeab37fcfafe..dd6e6cbafe0e 100644
--- a/vendor/github.com/cilium/ebpf/internal/output.go
+++ b/vendor/github.com/cilium/ebpf/internal/output.go
@@ -6,6 +6,7 @@ import (
"go/format"
"go/scanner"
"io"
+ "reflect"
"strings"
"unicode"
)
@@ -82,3 +83,15 @@ func WriteFormatted(src []byte, out io.Writer) error {
return nel
}
+
+// GoTypeName is like %T, but elides the package name.
+//
+// Pointers to a type are peeled off.
+func GoTypeName(t any) string {
+ rT := reflect.TypeOf(t)
+ for rT.Kind() == reflect.Pointer {
+ rT = rT.Elem()
+ }
+ // Doesn't return the correct Name for generic types due to /~https://github.com/golang/go/issues/55924
+ return rT.Name()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go
index c711353c3ea5..01d892f93444 100644
--- a/vendor/github.com/cilium/ebpf/internal/pinning.go
+++ b/vendor/github.com/cilium/ebpf/internal/pinning.go
@@ -6,15 +6,12 @@ import (
"os"
"path/filepath"
"runtime"
- "unsafe"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
func Pin(currentPath, newPath string, fd *sys.FD) error {
- const bpfFSType = 0xcafe4a11
-
if newPath == "" {
return errors.New("given pinning path cannot be empty")
}
@@ -22,20 +19,11 @@ func Pin(currentPath, newPath string, fd *sys.FD) error {
return nil
}
- var statfs unix.Statfs_t
- if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil {
+ fsType, err := FSType(filepath.Dir(newPath))
+ if err != nil {
return err
}
-
- fsType := int64(statfs.Type)
- if unsafe.Sizeof(statfs.Type) == 4 {
- // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
- // negative number when interpreted as int32 so we need to cast via
- // uint32 to avoid sign extension.
- fsType = int64(uint32(statfs.Type))
- }
-
- if fsType != bpfFSType {
+ if fsType != unix.BPF_FS_MAGIC {
return fmt.Errorf("%s is not on a bpf filesystem", newPath)
}
@@ -50,7 +38,7 @@ func Pin(currentPath, newPath string, fd *sys.FD) error {
// Renameat2 is used instead of os.Rename to disallow the new path replacing
// an existing path.
- err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
+ err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
if err == nil {
// Object is now moved to the new pinning path.
return nil
diff --git a/vendor/github.com/cilium/ebpf/internal/platform.go b/vendor/github.com/cilium/ebpf/internal/platform.go
new file mode 100644
index 000000000000..6e90f2ef7148
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/platform.go
@@ -0,0 +1,43 @@
+package internal
+
+import (
+ "runtime"
+)
+
+// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by
+// the linux kernel.
+//
+// Based on /~https://github.com/golang/go/blob/master/src/go/build/syslist.go
+// and /~https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047
+func PlatformPrefix() string {
+ switch runtime.GOARCH {
+ case "386":
+ return "__ia32_"
+ case "amd64", "amd64p32":
+ return "__x64_"
+
+ case "arm", "armbe":
+ return "__arm_"
+ case "arm64", "arm64be":
+ return "__arm64_"
+
+ case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le":
+ return "__mips_"
+
+ case "s390":
+ return "__s390_"
+ case "s390x":
+ return "__s390x_"
+
+ case "riscv", "riscv64":
+ return "__riscv_"
+
+ case "ppc":
+ return "__powerpc_"
+ case "ppc64", "ppc64le":
+ return "__powerpc64_"
+
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go
new file mode 100644
index 000000000000..d629145b6293
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/prog.go
@@ -0,0 +1,11 @@
+package internal
+
+// EmptyBPFContext is the smallest-possible BPF input context to be used for
+// invoking `Program.{Run,Benchmark,Test}`.
+//
+// Programs require a context input buffer of at least 15 bytes. Looking in
+// net/bpf/test_run.c, bpf_test_init() requires that the input is at least
+// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets
+// with invalid pkt_len"), it also requires the skb to be non-empty after
+// removing the Layer 2 header.
+var EmptyBPFContext = make([]byte, 15)
diff --git a/vendor/github.com/cilium/ebpf/internal/statfs.go b/vendor/github.com/cilium/ebpf/internal/statfs.go
new file mode 100644
index 000000000000..44c02d676e6d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/statfs.go
@@ -0,0 +1,23 @@
+package internal
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+func FSType(path string) (int64, error) {
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(path, &statfs); err != nil {
+ return 0, err
+ }
+
+ fsType := int64(statfs.Type)
+ if unsafe.Sizeof(statfs.Type) == 4 {
+ // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
+ // negative number when interpreted as int32 so we need to cast via
+ // uint32 to avoid sign extension.
+ fsType = int64(uint32(statfs.Type))
+ }
+ return fsType, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
index 65517d45e26a..941a56fb91b1 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/fd.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
@@ -17,11 +17,39 @@ type FD struct {
}
func newFD(value int) *FD {
+ if onLeakFD != nil {
+ // Attempt to store the caller's stack for the given fd value.
+ // Panic if fds contains an existing stack for the fd.
+ old, exist := fds.LoadOrStore(value, callersFrames())
+ if exist {
+ f := old.(*runtime.Frames)
+ panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f)))
+ }
+ }
+
fd := &FD{value}
- runtime.SetFinalizer(fd, (*FD).Close)
+ runtime.SetFinalizer(fd, (*FD).finalize)
return fd
}
+// finalize is set as the FD's runtime finalizer and
+// sends a leak trace before calling FD.Close().
+func (fd *FD) finalize() {
+ if fd.raw < 0 {
+ return
+ }
+
+ // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback
+ // is invoked at most once for one sys.FD allocation, runtime.Frames can only
+ // be unwound once.
+ f, ok := fds.LoadAndDelete(fd.Int())
+ if ok && onLeakFD != nil {
+ onLeakFD(f.(*runtime.Frames))
+ }
+
+ _ = fd.Close()
+}
+
// NewFD wraps a raw fd with a finalizer.
//
// You must not use the raw fd after calling this function, since the underlying
@@ -64,15 +92,16 @@ func (fd *FD) Close() error {
return nil
}
+ return unix.Close(fd.disown())
+}
+
+func (fd *FD) disown() int {
value := int(fd.raw)
+ fds.Delete(int(value))
fd.raw = -1
- fd.Forget()
- return unix.Close(value)
-}
-
-func (fd *FD) Forget() {
runtime.SetFinalizer(fd, nil)
+ return value
}
func (fd *FD) Dup() (*FD, error) {
@@ -90,7 +119,15 @@ func (fd *FD) Dup() (*FD, error) {
return newFD(dup), nil
}
+// File takes ownership of FD and turns it into an [*os.File].
+//
+// You must not use the FD after the call returns.
+//
+// Returns nil if the FD is not valid.
func (fd *FD) File(name string) *os.File {
- fd.Forget()
- return os.NewFile(uintptr(fd.raw), name)
+ if fd.raw < 0 {
+ return nil
+ }
+
+ return os.NewFile(uintptr(fd.disown()), name)
}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
new file mode 100644
index 000000000000..cd50dd1f6428
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
@@ -0,0 +1,93 @@
+package sys
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sync"
+)
+
+// OnLeakFD controls tracing [FD] lifetime to detect resources that are not
+// closed by Close().
+//
+// If fn is not nil, tracing is enabled for all FDs created going forward. fn is
+// invoked for all FDs that are closed by the garbage collector instead of an
+// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn
+// (without disabling tracing in the meantime) will cause a panic.
+//
+// If fn is nil, tracing will be disabled. Any FDs that have not been closed are
+// considered to be leaked, fn will be invoked for them, and the process will be
+// terminated.
+//
+// fn will be invoked at most once for every unique sys.FD allocation since a
+// runtime.Frames can only be unwound once.
+func OnLeakFD(fn func(*runtime.Frames)) {
+ // Enable leak tracing if new fn is provided.
+ if fn != nil {
+ if onLeakFD != nil {
+ panic("OnLeakFD called twice with non-nil fn")
+ }
+
+ onLeakFD = fn
+ return
+ }
+
+ // fn is nil past this point.
+
+ if onLeakFD == nil {
+ return
+ }
+
+ // Call onLeakFD for all open fds.
+ if fs := flushFrames(); len(fs) != 0 {
+ for _, f := range fs {
+ onLeakFD(f)
+ }
+ }
+
+ onLeakFD = nil
+}
+
+var onLeakFD func(*runtime.Frames)
+
+// fds is a registry of all file descriptors wrapped into sys.fds that were
+// created while an fd tracer was active.
+var fds sync.Map // map[int]*runtime.Frames
+
+// flushFrames removes all elements from fds and returns them as a slice. This
+// deals with the fact that a runtime.Frames can only be unwound once using
+// Next().
+func flushFrames() []*runtime.Frames {
+ var frames []*runtime.Frames
+ fds.Range(func(key, value any) bool {
+ frames = append(frames, value.(*runtime.Frames))
+ fds.Delete(key)
+ return true
+ })
+ return frames
+}
+
+func callersFrames() *runtime.Frames {
+ c := make([]uintptr, 32)
+
+ // Skip runtime.Callers and this function.
+ i := runtime.Callers(2, c)
+ if i == 0 {
+ return nil
+ }
+
+ return runtime.CallersFrames(c)
+}
+
+// FormatFrames formats a runtime.Frames as a human-readable string.
+func FormatFrames(fs *runtime.Frames) string {
+ var b bytes.Buffer
+ for {
+ f, more := fs.Next()
+ b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line))
+ if !more {
+ break
+ }
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
new file mode 100644
index 000000000000..c80744ae0e07
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
@@ -0,0 +1,49 @@
+// Code generated by "stringer -type MapFlags"; DO NOT EDIT.
+
+package sys
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[BPF_F_NO_PREALLOC-1]
+ _ = x[BPF_F_NO_COMMON_LRU-2]
+ _ = x[BPF_F_NUMA_NODE-4]
+ _ = x[BPF_F_RDONLY-8]
+ _ = x[BPF_F_WRONLY-16]
+ _ = x[BPF_F_STACK_BUILD_ID-32]
+ _ = x[BPF_F_ZERO_SEED-64]
+ _ = x[BPF_F_RDONLY_PROG-128]
+ _ = x[BPF_F_WRONLY_PROG-256]
+ _ = x[BPF_F_CLONE-512]
+ _ = x[BPF_F_MMAPABLE-1024]
+ _ = x[BPF_F_PRESERVE_ELEMS-2048]
+ _ = x[BPF_F_INNER_MAP-4096]
+}
+
+const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAP"
+
+var _MapFlags_map = map[MapFlags]string{
+ 1: _MapFlags_name[0:17],
+ 2: _MapFlags_name[17:36],
+ 4: _MapFlags_name[36:51],
+ 8: _MapFlags_name[51:63],
+ 16: _MapFlags_name[63:75],
+ 32: _MapFlags_name[75:95],
+ 64: _MapFlags_name[95:110],
+ 128: _MapFlags_name[110:127],
+ 256: _MapFlags_name[127:144],
+ 512: _MapFlags_name[144:155],
+ 1024: _MapFlags_name[155:169],
+ 2048: _MapFlags_name[169:189],
+ 4096: _MapFlags_name[189:204],
+}
+
+func (i MapFlags) String() string {
+ if str, ok := _MapFlags_map[i]; ok {
+ return str
+ }
+ return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
index a221006888d0..e9bb59059730 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
@@ -20,7 +20,7 @@ func NewSlicePointer(buf []byte) Pointer {
return Pointer{ptr: unsafe.Pointer(&buf[0])}
}
-// NewSlicePointer creates a 64-bit pointer from a byte slice.
+// NewSlicePointerLen creates a 64-bit pointer from a byte slice.
//
// Useful to assign both the pointer and the length in one go.
func NewSlicePointerLen(buf []byte) (Pointer, uint32) {
@@ -36,3 +36,17 @@ func NewStringPointer(str string) Pointer {
return Pointer{ptr: unsafe.Pointer(p)}
}
+
+// NewStringSlicePointer allocates an array of Pointers to each string in the
+// given slice of strings and returns a 64-bit pointer to the start of the
+// resulting array.
+//
+// Use this function to pass arrays of strings as syscall arguments.
+func NewStringSlicePointer(strings []string) Pointer {
+ sp := make([]Pointer, 0, len(strings))
+ for _, s := range strings {
+ sp = append(sp, NewStringPointer(s))
+ }
+
+ return Pointer{ptr: unsafe.Pointer(&sp[0])}
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
index df903d780b15..6278c79c9ef1 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
@@ -1,5 +1,4 @@
//go:build armbe || mips || mips64p32
-// +build armbe mips mips64p32
package sys
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
index a6a51edb6e10..c27b537e8e08 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
@@ -1,5 +1,4 @@
//go:build 386 || amd64p32 || arm || mipsle || mips64p32le
-// +build 386 amd64p32 arm mipsle mips64p32le
package sys
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
index 7c0279e487c7..2d7828230ae7 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
@@ -1,5 +1,4 @@
//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32
-// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32
package sys
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
new file mode 100644
index 000000000000..7494c030c01c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
@@ -0,0 +1,83 @@
+package sys
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// A sigset containing only SIGPROF.
+var profSet unix.Sigset_t
+
+func init() {
+ // See sigsetAdd for details on the implementation. Open coded here so
+ // that the compiler will check the constant calculations for us.
+ profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits)
+}
+
+// maskProfilerSignal locks the calling goroutine to its underlying OS thread
+// and adds SIGPROF to the thread's signal mask. This prevents pprof from
+// interrupting expensive syscalls like e.g. BPF_PROG_LOAD.
+//
+// The caller must defer unmaskProfilerSignal() to reverse the operation.
+func maskProfilerSignal() {
+ runtime.LockOSThread()
+
+ if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil {
+ runtime.UnlockOSThread()
+ panic(fmt.Errorf("masking profiler signal: %w", err))
+ }
+}
+
+// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal
+// mask, allowing it to be interrupted for profiling once again.
+//
+// It also unlocks the current goroutine from its underlying OS thread.
+func unmaskProfilerSignal() {
+ defer runtime.UnlockOSThread()
+
+ if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil {
+ panic(fmt.Errorf("unmasking profiler signal: %w", err))
+ }
+}
+
+const (
+ // Signal is the nth bit in the bitfield.
+ sigprofBit = int(unix.SIGPROF - 1)
+ // The number of bits in one Sigset_t word.
+ wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8
+)
+
+// sigsetAdd adds signal to set.
+//
+// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch.
+// This function must be able to deal with both and so must avoid any direct
+// references to u32 or u64 types.
+func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error {
+ if signal < 1 {
+ return fmt.Errorf("signal %d must be larger than 0", signal)
+ }
+
+ // For amd64, runtime.sigaddset() performs the following operation:
+ // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31)
+ //
+ // This trick depends on sigset being two u32's, causing a signal in the the
+ // bottom 31 bits to be written to the low word if bit 32 is low, or the high
+ // word if bit 32 is high.
+
+ // Signal is the nth bit in the bitfield.
+ bit := int(signal - 1)
+ // Word within the sigset the bit needs to be written to.
+ word := bit / wordBits
+
+ if word >= len(set.Val) {
+ return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal)
+ }
+
+ // Write the signal bit into its corresponding word at the corrected offset.
+ set.Val[word] |= 1 << (bit % wordBits)
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
index 2a5935dc9123..4fae04db5d8c 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
@@ -8,10 +8,22 @@ import (
"github.com/cilium/ebpf/internal/unix"
)
+// ENOTSUPP is a Linux internal error code that has leaked into UAPI.
+//
+// It is not the same as ENOTSUP or EOPNOTSUPP.
+var ENOTSUPP = syscall.Errno(524)
+
// BPF wraps SYS_BPF.
//
// Any pointers contained in attr must use the Pointer type from this package.
func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
+ // Prevent the Go profiler from repeatedly interrupting the verifier,
+ // which could otherwise lead to a livelock due to receiving EAGAIN.
+ if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN {
+ maskProfilerSignal()
+ defer unmaskProfilerSignal()
+ }
+
for {
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
runtime.KeepAlive(attr)
@@ -33,10 +45,10 @@ func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
// Info is implemented by all structs that can be passed to the ObjInfo syscall.
//
-// MapInfo
-// ProgInfo
-// LinkInfo
-// BtfInfo
+// MapInfo
+// ProgInfo
+// LinkInfo
+// BtfInfo
type Info interface {
info() (unsafe.Pointer, uint32)
}
@@ -90,12 +102,45 @@ func NewObjName(name string) ObjName {
return result
}
+// LogLevel controls the verbosity of the kernel's eBPF program verifier.
+type LogLevel uint32
+
+const (
+ BPF_LOG_LEVEL1 LogLevel = 1 << iota
+ BPF_LOG_LEVEL2
+ BPF_LOG_STATS
+)
+
// LinkID uniquely identifies a bpf_link.
type LinkID uint32
// BTFID uniquely identifies a BTF blob loaded into the kernel.
type BTFID uint32
+// TypeID identifies a type in a BTF blob.
+type TypeID uint32
+
+// MapFlags control map behaviour.
+type MapFlags uint32
+
+//go:generate stringer -type MapFlags
+
+const (
+ BPF_F_NO_PREALLOC MapFlags = 1 << iota
+ BPF_F_NO_COMMON_LRU
+ BPF_F_NUMA_NODE
+ BPF_F_RDONLY
+ BPF_F_WRONLY
+ BPF_F_STACK_BUILD_ID
+ BPF_F_ZERO_SEED
+ BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG
+ BPF_F_CLONE
+ BPF_F_MMAPABLE
+ BPF_F_PRESERVE_ELEMS
+ BPF_F_INNER_MAP
+)
+
// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
// syscall.E* or unix.E* constants.
//
@@ -108,6 +153,13 @@ func (we wrappedErrno) Unwrap() error {
return we.Errno
}
+func (we wrappedErrno) Error() string {
+ if we.Errno == ENOTSUPP {
+ return "operation not supported"
+ }
+ return we.Errno.Error()
+}
+
type syscallError struct {
error
errno syscall.Errno
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go
index 291e3a6196cb..2af7759e5a30 100644
--- a/vendor/github.com/cilium/ebpf/internal/sys/types.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go
@@ -6,14 +6,14 @@ import (
"unsafe"
)
-type AdjRoomMode int32
+type AdjRoomMode uint32
const (
BPF_ADJ_ROOM_NET AdjRoomMode = 0
BPF_ADJ_ROOM_MAC AdjRoomMode = 1
)
-type AttachType int32
+type AttachType uint32
const (
BPF_CGROUP_INET_INGRESS AttachType = 0
@@ -62,7 +62,7 @@ const (
__MAX_BPF_ATTACH_TYPE AttachType = 43
)
-type Cmd int32
+type Cmd uint32
const (
BPF_MAP_CREATE Cmd = 0
@@ -104,7 +104,7 @@ const (
BPF_PROG_BIND_MAP Cmd = 35
)
-type FunctionId int32
+type FunctionId uint32
const (
BPF_FUNC_unspec FunctionId = 0
@@ -301,17 +301,27 @@ const (
BPF_FUNC_copy_from_user_task FunctionId = 191
BPF_FUNC_skb_set_tstamp FunctionId = 192
BPF_FUNC_ima_file_hash FunctionId = 193
- __BPF_FUNC_MAX_ID FunctionId = 194
+ BPF_FUNC_kptr_xchg FunctionId = 194
+ BPF_FUNC_map_lookup_percpu_elem FunctionId = 195
+ BPF_FUNC_skc_to_mptcp_sock FunctionId = 196
+ BPF_FUNC_dynptr_from_mem FunctionId = 197
+ BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198
+ BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199
+ BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200
+ BPF_FUNC_dynptr_read FunctionId = 201
+ BPF_FUNC_dynptr_write FunctionId = 202
+ BPF_FUNC_dynptr_data FunctionId = 203
+ __BPF_FUNC_MAX_ID FunctionId = 204
)
-type HdrStartOff int32
+type HdrStartOff uint32
const (
BPF_HDR_START_MAC HdrStartOff = 0
BPF_HDR_START_NET HdrStartOff = 1
)
-type LinkType int32
+type LinkType uint32
const (
BPF_LINK_TYPE_UNSPEC LinkType = 0
@@ -323,10 +333,11 @@ const (
BPF_LINK_TYPE_XDP LinkType = 6
BPF_LINK_TYPE_PERF_EVENT LinkType = 7
BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8
- MAX_BPF_LINK_TYPE LinkType = 9
+ BPF_LINK_TYPE_STRUCT_OPS LinkType = 9
+ MAX_BPF_LINK_TYPE LinkType = 10
)
-type MapType int32
+type MapType uint32
const (
BPF_MAP_TYPE_UNSPEC MapType = 0
@@ -362,7 +373,7 @@ const (
BPF_MAP_TYPE_BLOOM_FILTER MapType = 30
)
-type ProgType int32
+type ProgType uint32
const (
BPF_PROG_TYPE_UNSPEC ProgType = 0
@@ -399,7 +410,7 @@ const (
BPF_PROG_TYPE_SYSCALL ProgType = 31
)
-type RetCode int32
+type RetCode uint32
const (
BPF_OK RetCode = 0
@@ -408,14 +419,14 @@ const (
BPF_LWT_REROUTE RetCode = 128
)
-type SkAction int32
+type SkAction uint32
const (
SK_DROP SkAction = 0
SK_PASS SkAction = 1
)
-type StackBuildIdStatus int32
+type StackBuildIdStatus uint32
const (
BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0
@@ -423,13 +434,13 @@ const (
BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2
)
-type StatsType int32
+type StatsType uint32
const (
BPF_STATS_RUN_TIME StatsType = 0
)
-type XdpAction int32
+type XdpAction uint32
const (
XDP_ABORTED XdpAction = 0
@@ -474,15 +485,15 @@ type MapInfo struct {
KeySize uint32
ValueSize uint32
MaxEntries uint32
- MapFlags uint32
+ MapFlags MapFlags
Name ObjName
Ifindex uint32
- BtfVmlinuxValueTypeId uint32
+ BtfVmlinuxValueTypeId TypeID
NetnsDev uint64
NetnsIno uint64
BtfId uint32
- BtfKeyTypeId uint32
- BtfValueTypeId uint32
+ BtfKeyTypeId TypeID
+ BtfValueTypeId TypeID
_ [4]byte
MapExtra uint64
}
@@ -508,7 +519,7 @@ type ProgInfo struct {
NrJitedFuncLens uint32
JitedKsyms uint64
JitedFuncLens uint64
- BtfId uint32
+ BtfId BTFID
FuncInfoRecSize uint32
FuncInfo uint64
NrFuncInfo uint32
@@ -616,7 +627,7 @@ type LinkCreateAttr struct {
TargetFd uint32
AttachType AttachType
Flags uint32
- TargetBtfId uint32
+ TargetBtfId TypeID
_ [28]byte
}
@@ -646,6 +657,26 @@ func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) {
return NewFD(int(fd))
}
+type LinkCreateKprobeMultiAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ KprobeMultiFlags uint32
+ Count uint32
+ Syms Pointer
+ Addrs Pointer
+ Cookies Pointer
+}
+
+func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
type LinkCreatePerfEventAttr struct {
ProgFd uint32
TargetFd uint32
@@ -663,6 +694,25 @@ func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
return NewFD(int(fd))
}
+type LinkCreateTracingAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ TargetBtfId BTFID
+ _ [4]byte
+ Cookie uint64
+ _ [16]byte
+}
+
+func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
type LinkUpdateAttr struct {
LinkFd uint32
NewProgFd uint32
@@ -680,15 +730,15 @@ type MapCreateAttr struct {
KeySize uint32
ValueSize uint32
MaxEntries uint32
- MapFlags uint32
+ MapFlags MapFlags
InnerMapFd uint32
NumaNode uint32
MapName ObjName
MapIfindex uint32
BtfFd uint32
- BtfKeyTypeId uint32
- BtfValueTypeId uint32
- BtfVmlinuxValueTypeId uint32
+ BtfKeyTypeId TypeID
+ BtfValueTypeId TypeID
+ BtfVmlinuxValueTypeId TypeID
MapExtra uint64
}
@@ -951,7 +1001,7 @@ type ProgLoadAttr struct {
InsnCnt uint32
Insns Pointer
License Pointer
- LogLevel uint32
+ LogLevel LogLevel
LogSize uint32
LogBuf Pointer
KernVersion uint32
@@ -966,8 +1016,8 @@ type ProgLoadAttr struct {
LineInfoRecSize uint32
LineInfo Pointer
LineInfoCnt uint32
- AttachBtfId uint32
- AttachProgFd uint32
+ AttachBtfId TypeID
+ AttachBtfObjFd uint32
CoreReloCnt uint32
FdArray Pointer
CoreRelos Pointer
@@ -983,6 +1033,21 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
return NewFD(int(fd))
}
+type ProgQueryAttr struct {
+ TargetFd uint32
+ AttachType AttachType
+ QueryFlags uint32
+ AttachFlags uint32
+ ProgIds Pointer
+ ProgCount uint32
+ _ [4]byte
+}
+
+func ProgQuery(attr *ProgQueryAttr) error {
+ _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
type ProgRunAttr struct {
ProgFd uint32
Retval uint32
@@ -1046,7 +1111,7 @@ type RawTracepointLinkInfo struct {
type TracingLinkInfo struct {
AttachType AttachType
TargetObjId uint32
- TargetBtfId uint32
+ TargetBtfId TypeID
}
type XDPLinkInfo struct{ Ifindex uint32 }
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
new file mode 100644
index 000000000000..4059a099b087
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
@@ -0,0 +1,359 @@
+package tracefs
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ ErrInvalidInput = errors.New("invalid input")
+
+ ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
+)
+
+//go:generate stringer -type=ProbeType -linecomment
+
+type ProbeType uint8
+
+const (
+ Kprobe ProbeType = iota // kprobe
+ Uprobe // uprobe
+)
+
+func (pt ProbeType) eventsFile() (*os.File, error) {
+ path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String()))
+ if err != nil {
+ return nil, err
+ }
+
+ return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666)
+}
+
+type ProbeArgs struct {
+ Type ProbeType
+ Symbol, Group, Path string
+ Offset, RefCtrOffset, Cookie uint64
+ Pid, RetprobeMaxActive int
+ Ret bool
+}
+
+// RandomGroup generates a pseudorandom string for use as a tracefs group name.
+// Returns an error when the output string would exceed 63 characters (kernel
+// limitation), when rand.Read() fails or when prefix contains characters not
+// allowed by IsValidTraceID.
+func RandomGroup(prefix string) (string, error) {
+ if !validIdentifier(prefix) {
+ return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput)
+ }
+
+ b := make([]byte, 8)
+ if _, err := rand.Read(b); err != nil {
+ return "", fmt.Errorf("reading random bytes: %w", err)
+ }
+
+ group := fmt.Sprintf("%s_%x", prefix, b)
+ if len(group) > 63 {
+ return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput)
+ }
+
+ return group, nil
+}
+
+// validIdentifier implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
+//
+// Trace event groups, names and kernel symbols must adhere to this set
+// of characters. Non-empty, first character must not be a number, all
+// characters must be alphanumeric or underscore.
+func validIdentifier(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+func sanitizeTracefsPath(path ...string) (string, error) {
+ base, err := getTracefsPath()
+ if err != nil {
+ return "", err
+ }
+ l := filepath.Join(path...)
+ p := filepath.Join(base, l)
+ if !strings.HasPrefix(p, base) {
+ return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput)
+ }
+ return p, nil
+}
+
+// getTracefsPath will return a correct path to the tracefs mount point.
+// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing,
+// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted.
+// The available tracefs paths will depends on distribution choices.
+var getTracefsPath = internal.Memoize(func() (string, error) {
+ for _, p := range []struct {
+ path string
+ fsType int64
+ }{
+ {"/sys/kernel/tracing", unix.TRACEFS_MAGIC},
+ {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC},
+ // RHEL/CentOS
+ {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC},
+ } {
+ if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType {
+ return p.path, nil
+ }
+ }
+
+ return "", errors.New("neither debugfs nor tracefs are mounted")
+})
+
+// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore.
+//
+// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
+func sanitizeIdentifier(s string) string {
+ var skip bool
+ return strings.Map(func(c rune) rune {
+ switch {
+ case c >= 'a' && c <= 'z',
+ c >= 'A' && c <= 'Z',
+ c >= '0' && c <= '9':
+ skip = false
+ return c
+
+ case skip:
+ return -1
+
+ default:
+ skip = true
+ return '_'
+ }
+ }, s)
+}
+
+// EventID reads a trace event's ID from tracefs given its group and name.
+// The kernel requires group and name to be alphanumeric or underscore.
+func EventID(group, name string) (uint64, error) {
+ if !validIdentifier(group) {
+ return 0, fmt.Errorf("invalid tracefs group: %q", group)
+ }
+
+ if !validIdentifier(name) {
+ return 0, fmt.Errorf("invalid tracefs name: %q", name)
+ }
+
+ path, err := sanitizeTracefsPath("events", group, name, "id")
+ if err != nil {
+ return 0, err
+ }
+ tid, err := internal.ReadUint64FromFile("%d\n", path)
+ if errors.Is(err, os.ErrNotExist) {
+ return 0, err
+ }
+ if err != nil {
+ return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
+ }
+
+ return tid, nil
+}
+
+func probePrefix(ret bool, maxActive int) string {
+ if ret {
+ if maxActive > 0 {
+ return fmt.Sprintf("r%d", maxActive)
+ }
+ return "r"
+ }
+ return "p"
+}
+
+// Event represents an entry in a tracefs probe events file.
+type Event struct {
+ typ ProbeType
+ group, name string
+ // event id allocated by the kernel. 0 if the event has already been removed.
+ id uint64
+}
+
+// NewEvent creates a new ephemeral trace event.
+//
+// Returns os.ErrNotExist if symbol is not a valid
+// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
+// if a probe with the same group and symbol already exists. Returns an error if
+// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
+// the kernel is too old to support kretprobe maxactive.
+func NewEvent(args ProbeArgs) (*Event, error) {
+ // Before attempting to create a trace event through tracefs,
+ // check if an event with the same group and name already exists.
+ // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
+ // entry, so we need to rely on reads for detecting uniqueness.
+ eventName := sanitizeIdentifier(args.Symbol)
+ _, err := EventID(args.Group, eventName)
+ if err == nil {
+ return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist)
+ }
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err)
+ }
+
+ // Open the kprobe_events file in tracefs.
+ f, err := args.Type.eventsFile()
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var pe, token string
+ switch args.Type {
+ case Kprobe:
+ // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
+ // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+ // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
+ // p:ebpf_5678/p_my_kprobe __x64_sys_execve
+ //
+ // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
+ // kernel default to NR_CPUS. This is desired in most eBPF cases since
+ // subsampling or rate limiting logic can be more accurately implemented in
+ // the eBPF program itself.
+ // See Documentation/kprobes.txt for more details.
+ if args.RetprobeMaxActive != 0 && !args.Ret {
+ return nil, ErrInvalidMaxActive
+ }
+ token = KprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token)
+ case Uprobe:
+ // The uprobe_events syntax is as follows:
+ // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
+ // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/readline /bin/bash:0x12345
+ // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
+ //
+ // See Documentation/trace/uprobetracer.txt for more details.
+ if args.RetprobeMaxActive != 0 {
+ return nil, ErrInvalidMaxActive
+ }
+ token = UprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token)
+ }
+ _, err = f.WriteString(pe)
+
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a retprobe for a missing symbol.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("token %s: not found: %w", token, err)
+ }
+ // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary. The exact conditions that trigger this error are
+ // arch specific however.
+ if errors.Is(err, syscall.EILSEQ) {
+ return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+ }
+ // ERANGE is returned when the `SYM[+offs]` token is too big and cannot
+ // be resolved.
+ if errors.Is(err, syscall.ERANGE) {
+ return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
+ }
+
+ // Get the newly-created trace event's id.
+ tid, err := EventID(args.Group, eventName)
+ if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
+ // Kernels < 4.12 don't support maxactive and therefore auto generate
+ // group and event names from the symbol and offset. The symbol is used
+ // without any sanitization.
+ // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
+ event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset)
+ if err := removeEvent(args.Type, event); err != nil {
+ return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
+ }
+ return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get trace event id: %w", err)
+ }
+
+ evt := &Event{args.Type, args.Group, eventName, tid}
+ runtime.SetFinalizer(evt, (*Event).Close)
+ return evt, nil
+}
+
+// Close removes the event from tracefs.
+//
+// Returns os.ErrClosed if the event has already been closed before.
+func (evt *Event) Close() error {
+ if evt.id == 0 {
+ return os.ErrClosed
+ }
+
+ evt.id = 0
+ runtime.SetFinalizer(evt, nil)
+ pe := fmt.Sprintf("%s/%s", evt.group, evt.name)
+ return removeEvent(evt.typ, pe)
+}
+
+func removeEvent(typ ProbeType, pe string) error {
+ f, err := typ.eventsFile()
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // See [k,u]probe_events syntax above. The probe type does not need to be specified
+ // for removals.
+ if _, err = f.WriteString("-:" + pe); err != nil {
+ return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err)
+ }
+
+ return nil
+}
+
+// ID returns the tracefs ID associated with the event.
+func (evt *Event) ID() uint64 {
+ return evt.id
+}
+
+// Group returns the tracefs group used by the event.
+func (evt *Event) Group() string {
+ return evt.group
+}
+
+// KprobeToken creates the SYM[+offs] token for the tracefs api.
+func KprobeToken(args ProbeArgs) string {
+ po := args.Symbol
+
+ if args.Offset != 0 {
+ po += fmt.Sprintf("+%#x", args.Offset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
new file mode 100644
index 000000000000..87cb0a059b4e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
@@ -0,0 +1,24 @@
+// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT.
+
+package tracefs
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Kprobe-0]
+ _ = x[Uprobe-1]
+}
+
+const _ProbeType_name = "kprobeuprobe"
+
+var _ProbeType_index = [...]uint8{0, 6, 12}
+
+func (i ProbeType) String() string {
+ if i >= ProbeType(len(_ProbeType_index)-1) {
+ return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
new file mode 100644
index 000000000000..994f31260deb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
@@ -0,0 +1,16 @@
+package tracefs
+
+import "fmt"
+
+// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
+func UprobeToken(args ProbeArgs) string {
+ po := fmt.Sprintf("%s:%#x", args.Path, args.Offset)
+
+ if args.RefCtrOffset != 0 {
+ // This is not documented in Documentation/trace/uprobetracer.txt.
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
+ po += fmt.Sprintf("(%#x)", args.RefCtrOffset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go
new file mode 100644
index 000000000000..d168d36f1802
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go
@@ -0,0 +1,11 @@
+// Package unix re-exports Linux specific parts of golang.org/x/sys/unix.
+//
+// It avoids breaking compilation on other OS by providing stubs as follows:
+// - Invoking a function always returns an error.
+// - Errnos have distinct, non-zero values.
+// - Constants have distinct but meaningless values.
+// - Types use the same names for members, but may or may not follow the
+// Linux layout.
+package unix
+
+// Note: please don't add any custom API to this package. Use internal/sys instead.
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
index db4a1f5bf9e7..7c9705919a36 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -1,5 +1,4 @@
//go:build linux
-// +build linux
package unix
@@ -10,189 +9,178 @@ import (
)
const (
- ENOENT = linux.ENOENT
- EEXIST = linux.EEXIST
- EAGAIN = linux.EAGAIN
- ENOSPC = linux.ENOSPC
- EINVAL = linux.EINVAL
- EPOLLIN = linux.EPOLLIN
- EINTR = linux.EINTR
- EPERM = linux.EPERM
- ESRCH = linux.ESRCH
- ENODEV = linux.ENODEV
- EBADF = linux.EBADF
- E2BIG = linux.E2BIG
- EFAULT = linux.EFAULT
- EACCES = linux.EACCES
- // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
- ENOTSUPP = syscall.Errno(0x20c)
-
- BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
- BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
- BPF_F_RDONLY = linux.BPF_F_RDONLY
- BPF_F_WRONLY = linux.BPF_F_WRONLY
- BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
- BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
- BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
- BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
- BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
- BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
- BPF_TAG_SIZE = linux.BPF_TAG_SIZE
- BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
- BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT
- BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ
- SYS_BPF = linux.SYS_BPF
- F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
- EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
- EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
- O_CLOEXEC = linux.O_CLOEXEC
- O_NONBLOCK = linux.O_NONBLOCK
- PROT_READ = linux.PROT_READ
- PROT_WRITE = linux.PROT_WRITE
- MAP_SHARED = linux.MAP_SHARED
- PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
- PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
- PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
- PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
- PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
- PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
- PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
- PerfBitWatermark = linux.PerfBitWatermark
- PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
- PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
- RLIM_INFINITY = linux.RLIM_INFINITY
- RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
- BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
- PERF_RECORD_LOST = linux.PERF_RECORD_LOST
- PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
- AT_FDCWD = linux.AT_FDCWD
- RENAME_NOREPLACE = linux.RENAME_NOREPLACE
- SO_ATTACH_BPF = linux.SO_ATTACH_BPF
- SO_DETACH_BPF = linux.SO_DETACH_BPF
- SOL_SOCKET = linux.SOL_SOCKET
+ ENOENT = linux.ENOENT
+ EEXIST = linux.EEXIST
+ EAGAIN = linux.EAGAIN
+ ENOSPC = linux.ENOSPC
+ EINVAL = linux.EINVAL
+ EPOLLIN = linux.EPOLLIN
+ EINTR = linux.EINTR
+ EPERM = linux.EPERM
+ ESRCH = linux.ESRCH
+ ENODEV = linux.ENODEV
+ EBADF = linux.EBADF
+ E2BIG = linux.E2BIG
+ EFAULT = linux.EFAULT
+ EACCES = linux.EACCES
+ EILSEQ = linux.EILSEQ
+ EOPNOTSUPP = linux.EOPNOTSUPP
)
-// Statfs_t is a wrapper
-type Statfs_t = linux.Statfs_t
+const (
+ BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
+ BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
+ BPF_F_RDONLY = linux.BPF_F_RDONLY
+ BPF_F_WRONLY = linux.BPF_F_WRONLY
+ BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
+ BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS
+ BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
+ BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
+ BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN
+ BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE = linux.BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ
+ SYS_BPF = linux.SYS_BPF
+ F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
+ EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
+ EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
+ O_CLOEXEC = linux.O_CLOEXEC
+ O_NONBLOCK = linux.O_NONBLOCK
+ PROT_NONE = linux.PROT_NONE
+ PROT_READ = linux.PROT_READ
+ PROT_WRITE = linux.PROT_WRITE
+ MAP_ANON = linux.MAP_ANON
+ MAP_SHARED = linux.MAP_SHARED
+ MAP_PRIVATE = linux.MAP_PRIVATE
+ PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
+ PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
+ PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
+ PerfBitWatermark = linux.PerfBitWatermark
+ PerfBitWriteBackward = linux.PerfBitWriteBackward
+ PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY = linux.RLIM_INFINITY
+ RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST = linux.PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
+ AT_FDCWD = linux.AT_FDCWD
+ RENAME_NOREPLACE = linux.RENAME_NOREPLACE
+ SO_ATTACH_BPF = linux.SO_ATTACH_BPF
+ SO_DETACH_BPF = linux.SO_DETACH_BPF
+ SOL_SOCKET = linux.SOL_SOCKET
+ SIGPROF = linux.SIGPROF
+ SIG_BLOCK = linux.SIG_BLOCK
+ SIG_UNBLOCK = linux.SIG_UNBLOCK
+ EM_NONE = linux.EM_NONE
+ EM_BPF = linux.EM_BPF
+ BPF_FS_MAGIC = linux.BPF_FS_MAGIC
+ TRACEFS_MAGIC = linux.TRACEFS_MAGIC
+ DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC
+)
+type Statfs_t = linux.Statfs_t
type Stat_t = linux.Stat_t
-
-// Rlimit is a wrapper
type Rlimit = linux.Rlimit
+type Signal = linux.Signal
+type Sigset_t = linux.Sigset_t
+type PerfEventMmapPage = linux.PerfEventMmapPage
+type EpollEvent = linux.EpollEvent
+type PerfEventAttr = linux.PerfEventAttr
+type Utsname = linux.Utsname
-// Syscall is a wrapper
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
return linux.Syscall(trap, a1, a2, a3)
}
-// FcntlInt is a wrapper
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ return linux.PthreadSigmask(how, set, oldset)
+}
+
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return linux.FcntlInt(fd, cmd, arg)
}
-// IoctlSetInt is a wrapper
func IoctlSetInt(fd int, req uint, value int) error {
return linux.IoctlSetInt(fd, req, value)
}
-// Statfs is a wrapper
func Statfs(path string, buf *Statfs_t) (err error) {
return linux.Statfs(path, buf)
}
-// Close is a wrapper
func Close(fd int) (err error) {
return linux.Close(fd)
}
-// EpollEvent is a wrapper
-type EpollEvent = linux.EpollEvent
-
-// EpollWait is a wrapper
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
return linux.EpollWait(epfd, events, msec)
}
-// EpollCtl is a wrapper
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
return linux.EpollCtl(epfd, op, fd, event)
}
-// Eventfd is a wrapper
func Eventfd(initval uint, flags int) (fd int, err error) {
return linux.Eventfd(initval, flags)
}
-// Write is a wrapper
func Write(fd int, p []byte) (n int, err error) {
return linux.Write(fd, p)
}
-// EpollCreate1 is a wrapper
func EpollCreate1(flag int) (fd int, err error) {
return linux.EpollCreate1(flag)
}
-// PerfEventMmapPage is a wrapper
-type PerfEventMmapPage linux.PerfEventMmapPage
-
-// SetNonblock is a wrapper
func SetNonblock(fd int, nonblocking bool) (err error) {
return linux.SetNonblock(fd, nonblocking)
}
-// Mmap is a wrapper
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return linux.Mmap(fd, offset, length, prot, flags)
}
-// Munmap is a wrapper
func Munmap(b []byte) (err error) {
return linux.Munmap(b)
}
-// PerfEventAttr is a wrapper
-type PerfEventAttr = linux.PerfEventAttr
-
-// PerfEventOpen is a wrapper
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
}
-// Utsname is a wrapper
-type Utsname = linux.Utsname
-
-// Uname is a wrapper
func Uname(buf *Utsname) (err error) {
return linux.Uname(buf)
}
-// Getpid is a wrapper
func Getpid() int {
return linux.Getpid()
}
-// Gettid is a wrapper
func Gettid() int {
return linux.Gettid()
}
-// Tgkill is a wrapper
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return linux.Tgkill(tgid, tid, sig)
}
-// BytePtrFromString is a wrapper
func BytePtrFromString(s string) (*byte, error) {
return linux.BytePtrFromString(s)
}
-// ByteSliceToString is a wrapper
func ByteSliceToString(s []byte) string {
return linux.ByteSliceToString(s)
}
-// Renameat2 is a wrapper
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags)
}
@@ -208,3 +196,7 @@ func Open(path string, mode int, perm uint32) (int, error) {
func Fstat(fd int, stat *Stat_t) error {
return linux.Fstat(fd, stat)
}
+
+func SetsockoptInt(fd, level, opt, value int) error {
+ return linux.SetsockoptInt(fd, level, opt, value)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
index 133c267dbc3d..5e86b5052a19 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -1,5 +1,4 @@
//go:build !linux
-// +build !linux
package unix
@@ -11,70 +10,87 @@ import (
var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+// Errnos are distinct and non-zero.
const (
- ENOENT = syscall.ENOENT
- EEXIST = syscall.EEXIST
- EAGAIN = syscall.EAGAIN
- ENOSPC = syscall.ENOSPC
- EINVAL = syscall.EINVAL
- EINTR = syscall.EINTR
- EPERM = syscall.EPERM
- ESRCH = syscall.ESRCH
- ENODEV = syscall.ENODEV
- EBADF = syscall.Errno(0)
- E2BIG = syscall.Errno(0)
- EFAULT = syscall.EFAULT
- EACCES = syscall.Errno(0)
- // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
- ENOTSUPP = syscall.Errno(0x20c)
-
- BPF_F_NO_PREALLOC = 0
- BPF_F_NUMA_NODE = 0
- BPF_F_RDONLY = 0
- BPF_F_WRONLY = 0
- BPF_F_RDONLY_PROG = 0
- BPF_F_WRONLY_PROG = 0
- BPF_F_SLEEPABLE = 0
- BPF_F_MMAPABLE = 0
- BPF_F_INNER_MAP = 0
- BPF_OBJ_NAME_LEN = 0x10
- BPF_TAG_SIZE = 0x8
- BPF_RINGBUF_BUSY_BIT = 0
- BPF_RINGBUF_DISCARD_BIT = 0
- BPF_RINGBUF_HDR_SZ = 0
- SYS_BPF = 321
- F_DUPFD_CLOEXEC = 0x406
- EPOLLIN = 0x1
- EPOLL_CTL_ADD = 0x1
- EPOLL_CLOEXEC = 0x80000
- O_CLOEXEC = 0x80000
- O_NONBLOCK = 0x800
- PROT_READ = 0x1
- PROT_WRITE = 0x2
- MAP_SHARED = 0x1
- PERF_ATTR_SIZE_VER1 = 0
- PERF_TYPE_SOFTWARE = 0x1
- PERF_TYPE_TRACEPOINT = 0
- PERF_COUNT_SW_BPF_OUTPUT = 0xa
- PERF_EVENT_IOC_DISABLE = 0
- PERF_EVENT_IOC_ENABLE = 0
- PERF_EVENT_IOC_SET_BPF = 0
- PerfBitWatermark = 0x4000
- PERF_SAMPLE_RAW = 0x400
- PERF_FLAG_FD_CLOEXEC = 0x8
- RLIM_INFINITY = 0x7fffffffffffffff
- RLIMIT_MEMLOCK = 8
- BPF_STATS_RUN_TIME = 0
- PERF_RECORD_LOST = 2
- PERF_RECORD_SAMPLE = 9
- AT_FDCWD = -0x2
- RENAME_NOREPLACE = 0x1
- SO_ATTACH_BPF = 0x32
- SO_DETACH_BPF = 0x1b
- SOL_SOCKET = 0x1
+ ENOENT syscall.Errno = iota + 1
+ EEXIST
+ EAGAIN
+ ENOSPC
+ EINVAL
+ EINTR
+ EPERM
+ ESRCH
+ ENODEV
+ EBADF
+ E2BIG
+ EFAULT
+ EACCES
+ EILSEQ
+ EOPNOTSUPP
+)
+
+// Constants are distinct to avoid breaking switch statements.
+const (
+ BPF_F_NO_PREALLOC = iota
+ BPF_F_NUMA_NODE
+ BPF_F_RDONLY
+ BPF_F_WRONLY
+ BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE
+ BPF_F_MMAPABLE
+ BPF_F_INNER_MAP
+ BPF_F_KPROBE_MULTI_RETURN
+ BPF_F_XDP_HAS_FRAGS
+ BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ
+ SYS_BPF
+ F_DUPFD_CLOEXEC
+ EPOLLIN
+ EPOLL_CTL_ADD
+ EPOLL_CLOEXEC
+ O_CLOEXEC
+ O_NONBLOCK
+ PROT_NONE
+ PROT_READ
+ PROT_WRITE
+ MAP_ANON
+ MAP_SHARED
+ MAP_PRIVATE
+ PERF_ATTR_SIZE_VER1
+ PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT
+ PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF
+ PerfBitWatermark
+ PerfBitWriteBackward
+ PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY
+ RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE
+ AT_FDCWD
+ RENAME_NOREPLACE
+ SO_ATTACH_BPF
+ SO_DETACH_BPF
+ SOL_SOCKET
+ SIGPROF
+ SIG_BLOCK
+ SIG_UNBLOCK
+ EM_NONE
+ EM_BPF
+ BPF_FS_MAGIC
+ TRACEFS_MAGIC
+ DEBUGFS_MAGIC
)
-// Statfs_t is a wrapper
type Statfs_t struct {
Type int64
Bsize int64
@@ -90,72 +106,81 @@ type Statfs_t struct {
Spare [4]int64
}
-type Stat_t struct{}
+type Stat_t struct {
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint32
+ Uid uint32
+ Gid uint32
+ _ int32
+ Rdev uint64
+ Size int64
+ Blksize int64
+ Blocks int64
+}
-// Rlimit is a wrapper
type Rlimit struct {
Cur uint64
Max uint64
}
-// Syscall is a wrapper
+type Signal int
+
+type Sigset_t struct {
+ Val [4]uint64
+}
+
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- return 0, 0, syscall.Errno(1)
+ return 0, 0, syscall.ENOTSUP
+}
+
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ return errNonLinux
}
-// FcntlInt is a wrapper
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return -1, errNonLinux
}
-// IoctlSetInt is a wrapper
func IoctlSetInt(fd int, req uint, value int) error {
return errNonLinux
}
-// Statfs is a wrapper
func Statfs(path string, buf *Statfs_t) error {
return errNonLinux
}
-// Close is a wrapper
func Close(fd int) (err error) {
return errNonLinux
}
-// EpollEvent is a wrapper
type EpollEvent struct {
Events uint32
Fd int32
Pad int32
}
-// EpollWait is a wrapper
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
return 0, errNonLinux
}
-// EpollCtl is a wrapper
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
return errNonLinux
}
-// Eventfd is a wrapper
func Eventfd(initval uint, flags int) (fd int, err error) {
return 0, errNonLinux
}
-// Write is a wrapper
func Write(fd int, p []byte) (n int, err error) {
return 0, errNonLinux
}
-// EpollCreate1 is a wrapper
func EpollCreate1(flag int) (fd int, err error) {
return 0, errNonLinux
}
-// PerfEventMmapPage is a wrapper
type PerfEventMmapPage struct {
Version uint32
Compat_version uint32
@@ -182,22 +207,18 @@ type PerfEventMmapPage struct {
Aux_size uint64
}
-// SetNonblock is a wrapper
func SetNonblock(fd int, nonblocking bool) (err error) {
return errNonLinux
}
-// Mmap is a wrapper
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return []byte{}, errNonLinux
}
-// Munmap is a wrapper
func Munmap(b []byte) (err error) {
return errNonLinux
}
-// PerfEventAttr is a wrapper
type PerfEventAttr struct {
Type uint32
Size uint32
@@ -219,48 +240,39 @@ type PerfEventAttr struct {
Sample_max_stack uint16
}
-// PerfEventOpen is a wrapper
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
return 0, errNonLinux
}
-// Utsname is a wrapper
type Utsname struct {
Release [65]byte
Version [65]byte
}
-// Uname is a wrapper
func Uname(buf *Utsname) (err error) {
return errNonLinux
}
-// Getpid is a wrapper
func Getpid() int {
return -1
}
-// Gettid is a wrapper
func Gettid() int {
return -1
}
-// Tgkill is a wrapper
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return errNonLinux
}
-// BytePtrFromString is a wrapper
func BytePtrFromString(s string) (*byte, error) {
return nil, errNonLinux
}
-// ByteSliceToString is a wrapper
func ByteSliceToString(s []byte) string {
return ""
}
-// Renameat2 is a wrapper
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
return errNonLinux
}
@@ -276,3 +288,7 @@ func Open(path string, mode int, perm uint32) (int, error) {
func Fstat(fd int, stat *Stat_t) error {
return errNonLinux
}
+
+func SetsockoptInt(fd, level, opt, value int) error {
+ return errNonLinux
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go
index ae4821de20c4..10e639bf06e9 100644
--- a/vendor/github.com/cilium/ebpf/internal/vdso.go
+++ b/vendor/github.com/cilium/ebpf/internal/vdso.go
@@ -23,6 +23,9 @@ func vdsoVersion() (uint32, error) {
// to the process. Go does not expose that data, so we must read it from procfs.
// https://man7.org/linux/man-pages/man3/getauxval.3.html
av, err := os.Open("/proc/self/auxv")
+ if errors.Is(err, unix.EACCES) {
+ return 0, fmt.Errorf("opening auxv: %w (process may not be dumpable due to file capabilities)", err)
+ }
if err != nil {
return 0, fmt.Errorf("opening auxv: %w", err)
}
@@ -117,7 +120,7 @@ func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
var name string
if n.NameSize > 0 {
// Read the note name, aligned to 4 bytes.
- buf := make([]byte, Align(int(n.NameSize), 4))
+ buf := make([]byte, Align(n.NameSize, 4))
if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil {
return 0, fmt.Errorf("reading note name: %w", err)
}
@@ -139,7 +142,7 @@ func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
}
// Discard the note descriptor if it exists but we're not interested in it.
- if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil {
+ if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil {
return 0, err
}
}
diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go
index 370e01e4447c..9b17ffb44de5 100644
--- a/vendor/github.com/cilium/ebpf/internal/version.go
+++ b/vendor/github.com/cilium/ebpf/internal/version.go
@@ -2,7 +2,6 @@ package internal
import (
"fmt"
- "sync"
"github.com/cilium/ebpf/internal/unix"
)
@@ -15,14 +14,6 @@ const (
MagicKernelVersion = 0xFFFFFFFE
)
-var (
- kernelVersion = struct {
- once sync.Once
- version Version
- err error
- }{}
-)
-
// A Version in the form Major.Minor.Patch.
type Version [3]uint16
@@ -88,16 +79,9 @@ func (v Version) Kernel() uint32 {
}
// KernelVersion returns the version of the currently running kernel.
-func KernelVersion() (Version, error) {
- kernelVersion.once.Do(func() {
- kernelVersion.version, kernelVersion.err = detectKernelVersion()
- })
-
- if kernelVersion.err != nil {
- return Version{}, kernelVersion.err
- }
- return kernelVersion.version, nil
-}
+var KernelVersion = Memoize(func() (Version, error) {
+ return detectKernelVersion()
+})
// detectKernelVersion returns the version of the running kernel.
func detectKernelVersion() (Version, error) {
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
index 003b0638e89d..58e85fe9d473 100644
--- a/vendor/github.com/cilium/ebpf/link/cgroup.go
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -10,10 +10,15 @@ import (
type cgroupAttachFlags uint32
-// cgroup attach flags
const (
+ // Allow programs attached to sub-cgroups to override the verdict of this
+ // program.
flagAllowOverride cgroupAttachFlags = 1 << iota
+ // Allow attaching multiple programs to the cgroup. Only works if the cgroup
+ // has zero or more programs attached using the Multi flag. Implies override.
flagAllowMulti
+ // Set automatically by progAttachCgroup.Update(). Used for updating a
+ // specific given program attached in multi-mode.
flagReplace
)
@@ -27,29 +32,39 @@ type CgroupOptions struct {
}
// AttachCgroup links a BPF program to a cgroup.
-func AttachCgroup(opts CgroupOptions) (Link, error) {
+//
+// If the running kernel doesn't support bpf_link, attempts to emulate its
+// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not
+// available, the returned [Link] will not support pinning to bpffs.
+//
+// If you need more control over attachment flags or the attachment mechanism
+// used, look at [RawAttachProgram] and [AttachRawLink] instead.
+func AttachCgroup(opts CgroupOptions) (cg Link, err error) {
cgroup, err := os.Open(opts.Path)
if err != nil {
return nil, fmt.Errorf("can't open cgroup: %s", err)
}
-
- clone, err := opts.Program.Clone()
- if err != nil {
+ defer func() {
+ if _, ok := cg.(*progAttachCgroup); ok {
+ // Skip closing the cgroup handle if we return a valid progAttachCgroup,
+ // where the handle is retained to implement Update().
+ return
+ }
cgroup.Close()
- return nil, err
+ }()
+
+ cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program)
+ if err == nil {
+ return cg, nil
}
- var cg Link
- cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
if errors.Is(err, ErrNotSupported) {
- cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
+ cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti)
}
if errors.Is(err, ErrNotSupported) {
- cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
+ cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride)
}
if err != nil {
- cgroup.Close()
- clone.Close()
return nil, err
}
@@ -67,6 +82,8 @@ var _ Link = (*progAttachCgroup)(nil)
func (cg *progAttachCgroup) isLink() {}
+// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH.
+// cgroup and prog are retained by [progAttachCgroup].
func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
if flags&flagAllowMulti > 0 {
if err := haveProgAttachReplace(); err != nil {
@@ -74,17 +91,24 @@ func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Pro
}
}
- err := RawAttachProgram(RawAttachProgramOptions{
+ // Use a program handle that cannot be closed by the caller.
+ clone, err := prog.Clone()
+ if err != nil {
+ return nil, err
+ }
+
+ err = RawAttachProgram(RawAttachProgramOptions{
Target: int(cgroup.Fd()),
- Program: prog,
+ Program: clone,
Flags: uint32(flags),
Attach: attach,
})
if err != nil {
+ clone.Close()
return nil, fmt.Errorf("cgroup: %w", err)
}
- return &progAttachCgroup{cgroup, prog, attach, flags}, nil
+ return &progAttachCgroup{cgroup, clone, attach, flags}, nil
}
func (cg *progAttachCgroup) Close() error {
@@ -138,7 +162,7 @@ func (cg *progAttachCgroup) Pin(string) error {
}
func (cg *progAttachCgroup) Unpin() error {
- return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
+ return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported)
}
func (cg *progAttachCgroup) Info() (*Info, error) {
@@ -151,6 +175,7 @@ type linkCgroup struct {
var _ Link = (*linkCgroup)(nil)
+// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE.
func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
link, err := AttachRawLink(RawLinkOptions{
Target: int(cgroup.Fd()),
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go
index fdf622a0c07f..b54ca9085335 100644
--- a/vendor/github.com/cilium/ebpf/link/kprobe.go
+++ b/vendor/github.com/cilium/ebpf/link/kprobe.go
@@ -1,42 +1,20 @@
package link
import (
- "bytes"
- "crypto/rand"
"errors"
"fmt"
"os"
- "path/filepath"
"runtime"
"strings"
- "sync"
- "syscall"
"unsafe"
"github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
"github.com/cilium/ebpf/internal/unix"
)
-var (
- kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
-
- kprobeRetprobeBit = struct {
- once sync.Once
- value uint64
- err error
- }{}
-)
-
-type probeType uint8
-
-type probeArgs struct {
- symbol, group, path string
- offset, refCtrOffset, cookie uint64
- pid int
- ret bool
-}
-
// KprobeOptions defines additional parameters that will be used
// when loading Kprobes.
type KprobeOptions struct {
@@ -49,45 +27,23 @@ type KprobeOptions struct {
// Can be used to insert kprobes at arbitrary offsets in kernel functions,
// e.g. in places where functions have been inlined.
Offset uint64
+ // Increase the maximum number of concurrent invocations of a kretprobe.
+ // Required when tracing some long running functions in the kernel.
+ //
+ // Deprecated: this setting forces the use of an outdated kernel API and is not portable
+ // across kernel versions.
+ RetprobeMaxActive int
+ // Prefix used for the event name if the kprobe must be attached using tracefs.
+ // The group name will be formatted as `_`.
+ // The default empty string is equivalent to "ebpf" as the prefix.
+ TraceFSPrefix string
}
-const (
- kprobeType probeType = iota
- uprobeType
-)
-
-func (pt probeType) String() string {
- if pt == kprobeType {
- return "kprobe"
- }
- return "uprobe"
-}
-
-func (pt probeType) EventsPath() string {
- if pt == kprobeType {
- return kprobeEventsPath
- }
- return uprobeEventsPath
-}
-
-func (pt probeType) PerfEventType(ret bool) perfEventType {
- if pt == kprobeType {
- if ret {
- return kretprobeEvent
- }
- return kprobeEvent
- }
- if ret {
- return uretprobeEvent
- }
- return uprobeEvent
-}
-
-func (pt probeType) RetprobeBit() (uint64, error) {
- if pt == kprobeType {
- return kretprobeBit()
+func (ko *KprobeOptions) cookie() uint64 {
+ if ko == nil {
+ return 0
}
- return uretprobeBit()
+ return ko.Cookie
}
// Kprobe attaches the given eBPF program to a perf event that fires when the
@@ -99,13 +55,17 @@ func (pt probeType) RetprobeBit() (uint64, error) {
// Losing the reference to the resulting Link (kp) will close the Kprobe
// and prevent further execution of prog. The Link must be Closed during
// program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
k, err := kprobe(symbol, prog, opts, false)
if err != nil {
return nil, err
}
- lnk, err := attachPerfEvent(k, prog)
+ lnk, err := attachPerfEvent(k, prog, opts.cookie())
if err != nil {
k.Close()
return nil, err
@@ -123,13 +83,20 @@ func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error
// Losing the reference to the resulting Link (kp) will close the Kretprobe
// and prevent further execution of prog. The Link must be Closed during
// program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
+//
+// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol
+// incorrectly returns unix.EINVAL instead of os.ErrNotExist.
func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
k, err := kprobe(symbol, prog, opts, true)
if err != nil {
return nil, err
}
- lnk, err := attachPerfEvent(k, prog)
+ lnk, err := attachPerfEvent(k, prog, opts.cookie())
if err != nil {
k.Close()
return nil, err
@@ -181,50 +148,51 @@ func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*
return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
}
- args := probeArgs{
- pid: perfAllThreads,
- symbol: symbol,
- ret: ret,
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Kprobe,
+ Pid: perfAllThreads,
+ Symbol: symbol,
+ Ret: ret,
}
if opts != nil {
- args.cookie = opts.Cookie
- args.offset = opts.Offset
+ args.RetprobeMaxActive = opts.RetprobeMaxActive
+ args.Cookie = opts.Cookie
+ args.Offset = opts.Offset
+ args.Group = opts.TraceFSPrefix
}
// Use kprobe PMU if the kernel has it available.
- tp, err := pmuKprobe(args)
- if errors.Is(err, os.ErrNotExist) {
- args.symbol = platformPrefix(symbol)
- tp, err = pmuKprobe(args)
+ tp, err := pmuProbe(args)
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ if prefix := internal.PlatformPrefix(); prefix != "" {
+ args.Symbol = prefix + symbol
+ tp, err = pmuProbe(args)
+ }
}
if err == nil {
return tp, nil
}
if err != nil && !errors.Is(err, ErrNotSupported) {
- return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err)
+ return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err)
}
// Use tracefs if kprobe PMU is missing.
- args.symbol = symbol
- tp, err = tracefsKprobe(args)
- if errors.Is(err, os.ErrNotExist) {
- args.symbol = platformPrefix(symbol)
- tp, err = tracefsKprobe(args)
+ args.Symbol = symbol
+ tp, err = tracefsProbe(args)
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ if prefix := internal.PlatformPrefix(); prefix != "" {
+ args.Symbol = prefix + symbol
+ tp, err = tracefsProbe(args)
+ }
}
if err != nil {
- return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
+ return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err)
}
return tp, nil
}
-// pmuKprobe opens a perf event based on the kprobe PMU.
-// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
-func pmuKprobe(args probeArgs) (*perfEvent, error) {
- return pmuProbe(kprobeType, args)
-}
-
// pmuProbe opens a perf event based on a Performance Monitoring Unit.
//
// Requires at least a 4.17 kernel.
@@ -232,17 +200,25 @@ func pmuKprobe(args probeArgs) (*perfEvent, error) {
// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
//
// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
-func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
+func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
// Getting the PMU type will fail if the kernel doesn't support
// the perf_[k,u]probe PMU.
- et, err := getPMUEventType(typ)
+ eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type")
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported)
+ }
if err != nil {
return nil, err
}
+ // Use tracefs if we want to set kretprobe's retprobeMaxActive.
+ if args.RetprobeMaxActive != 0 {
+ return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported)
+ }
+
var config uint64
- if args.ret {
- bit, err := typ.RetprobeBit()
+ if args.Ret {
+ bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe")
if err != nil {
return nil, err
}
@@ -250,75 +226,81 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
}
var (
- attr unix.PerfEventAttr
- sp unsafe.Pointer
+ attr unix.PerfEventAttr
+ sp unsafe.Pointer
+ token string
)
- switch typ {
- case kprobeType:
+ switch args.Type {
+ case tracefs.Kprobe:
// Create a pointer to a NUL-terminated string for the kernel.
- sp, err = unsafeStringPtr(args.symbol)
+ sp, err = unsafeStringPtr(args.Symbol)
if err != nil {
return nil, err
}
+ token = tracefs.KprobeToken(args)
+
attr = unix.PerfEventAttr{
// The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
// since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
Size: unix.PERF_ATTR_SIZE_VER1,
- Type: uint32(et), // PMU event type read from sysfs
+ Type: uint32(eventType), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
- Ext2: args.offset, // Kernel symbol offset
+ Ext2: args.Offset, // Kernel symbol offset
Config: config, // Retprobe flag
}
- case uprobeType:
- sp, err = unsafeStringPtr(args.path)
+ case tracefs.Uprobe:
+ sp, err = unsafeStringPtr(args.Path)
if err != nil {
return nil, err
}
- if args.refCtrOffset != 0 {
- config |= args.refCtrOffset << uprobeRefCtrOffsetShift
+ if args.RefCtrOffset != 0 {
+ config |= args.RefCtrOffset << uprobeRefCtrOffsetShift
}
+ token = tracefs.UprobeToken(args)
+
attr = unix.PerfEventAttr{
// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
// since it added the config2 (Ext2) field. The Size field controls the
// size of the internal buffer the kernel allocates for reading the
// perf_event_attr argument from userspace.
Size: unix.PERF_ATTR_SIZE_VER1,
- Type: uint32(et), // PMU event type read from sysfs
+ Type: uint32(eventType), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Uprobe path
- Ext2: args.offset, // Uprobe offset
+ Ext2: args.Offset, // Uprobe offset
Config: config, // RefCtrOffset, Retprobe flag
}
}
- rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+ rawFd, err := unix.PerfEventOpen(&attr, args.Pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
// On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
// return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
// /~https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
- if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") {
- return nil, fmt.Errorf("symbol '%s+%#x': older kernels don't accept dots: %w", args.symbol, args.offset, ErrNotSupported)
+ if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") {
+ return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported)
}
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
- // when trying to create a kretprobe for a missing symbol. Make sure ENOENT
- // is returned to the caller.
- if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
- return nil, fmt.Errorf("symbol '%s+%#x' not found: %w", args.symbol, args.offset, os.ErrNotExist)
+ // when trying to create a retprobe for a missing symbol.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("token %s: not found: %w", token, err)
}
- // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved
- // to an invalid insn boundary.
- if errors.Is(err, syscall.EILSEQ) {
- return nil, fmt.Errorf("symbol '%s+%#x' not found (bad insn boundary): %w", args.symbol, args.offset, os.ErrNotExist)
+ // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary. The exact conditions that trigger this error are
+ // arch specific however.
+ if errors.Is(err, unix.EILSEQ) {
+ return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
}
// Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
// when attempting to set a uprobe on a trap instruction.
- if errors.Is(err, unix.ENOTSUPP) {
- return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err)
+ if errors.Is(err, sys.ENOTSUPP) {
+ return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err)
}
+
if err != nil {
- return nil, fmt.Errorf("opening perf event: %w", err)
+ return nil, fmt.Errorf("token %s: opening perf event: %w", token, err)
}
// Ensure the string pointer is not collected before PerfEventOpen returns.
@@ -330,18 +312,7 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
}
// Kernel has perf_[k,u]probe PMU available, initialize perf event.
- return &perfEvent{
- typ: typ.PerfEventType(args.ret),
- name: args.symbol,
- pmuID: et,
- cookie: args.cookie,
- fd: fd,
- }, nil
-}
-
-// tracefsKprobe creates a Kprobe tracefs entry.
-func tracefsKprobe(args probeArgs) (*perfEvent, error) {
- return tracefsProbe(kprobeType, args)
+ return newPerfEvent(fd, nil), nil
}
// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events.
@@ -350,219 +321,37 @@ func tracefsKprobe(args probeArgs) (*perfEvent, error) {
// Path and offset are only set in the case of uprobe(s) and are used to set
// the executable/library path on the filesystem and the offset where the probe is inserted.
// A perf event is then opened on the newly-created trace event and returned to the caller.
-func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) {
+func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
+ groupPrefix := "ebpf"
+ if args.Group != "" {
+ groupPrefix = args.Group
+ }
+
// Generate a random string for each trace event we attempt to create.
// This value is used as the 'group' token in tracefs to allow creating
// multiple kprobe trace events with the same name.
- group, err := randomGroup("ebpf")
+ group, err := tracefs.RandomGroup(groupPrefix)
if err != nil {
return nil, fmt.Errorf("randomizing group name: %w", err)
}
- args.group = group
-
- // Before attempting to create a trace event through tracefs,
- // check if an event with the same group and name already exists.
- // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
- // entry, so we need to rely on reads for detecting uniqueness.
- _, err = getTraceEventID(group, args.symbol)
- if err == nil {
- return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol)
- }
- if err != nil && !errors.Is(err, os.ErrNotExist) {
- return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err)
- }
+ args.Group = group
// Create the [k,u]probe trace event using tracefs.
- if err := createTraceFSProbeEvent(typ, args); err != nil {
- return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
- }
- defer func() {
- if err != nil {
- // Make sure we clean up the created tracefs event when we return error.
- // If a livepatch handler is already active on the symbol, the write to
- // tracefs will succeed, a trace event will show up, but creating the
- // perf event will fail with EBUSY.
- _ = closeTraceFSProbeEvent(typ, args.group, args.symbol)
- }
- }()
-
- // Get the newly-created trace event's id.
- tid, err := getTraceEventID(group, args.symbol)
+ evt, err := tracefs.NewEvent(args)
if err != nil {
- return nil, fmt.Errorf("getting trace event id: %w", err)
+ return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
}
// Kprobes are ephemeral tracepoints and share the same perf event type.
- fd, err := openTracepointPerfEvent(tid, args.pid)
+ fd, err := openTracepointPerfEvent(evt.ID(), args.Pid)
if err != nil {
+ // Make sure we clean up the created tracefs event when we return error.
+ // If a livepatch handler is already active on the symbol, the write to
+ // tracefs will succeed, a trace event will show up, but creating the
+ // perf event will fail with EBUSY.
+ _ = evt.Close()
return nil, err
}
- return &perfEvent{
- typ: typ.PerfEventType(args.ret),
- group: group,
- name: args.symbol,
- tracefsID: tid,
- cookie: args.cookie,
- fd: fd,
- }, nil
-}
-
-// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
-// /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
-// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
-// if a probe with the same group and symbol already exists.
-func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
- // Open the kprobe_events file in tracefs.
- f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
- if err != nil {
- return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
- }
- defer f.Close()
-
- var pe, token string
- switch typ {
- case kprobeType:
- // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
- // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
- // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
- // -:[GRP/]EVENT : Clear a probe
- //
- // Some examples:
- // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
- // p:ebpf_5678/p_my_kprobe __x64_sys_execve
- //
- // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
- // kernel default to NR_CPUS. This is desired in most eBPF cases since
- // subsampling or rate limiting logic can be more accurately implemented in
- // the eBPF program itself.
- // See Documentation/kprobes.txt for more details.
- token = kprobeToken(args)
- pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token)
- case uprobeType:
- // The uprobe_events syntax is as follows:
- // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
- // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
- // -:[GRP/]EVENT : Clear a probe
- //
- // Some examples:
- // r:ebpf_1234/readline /bin/bash:0x12345
- // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
- //
- // See Documentation/trace/uprobetracer.txt for more details.
- token = uprobeToken(args)
- pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token)
- }
- _, err = f.WriteString(pe)
- // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
- // when trying to create a kretprobe for a missing symbol. Make sure ENOENT
- // is returned to the caller.
- // EINVAL is also returned on pre-5.2 kernels when the `SYM[+offs]` token
- // is resolved to an invalid insn boundary.
- if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
- return fmt.Errorf("token %s: %w", token, os.ErrNotExist)
- }
- // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved
- // to an invalid insn boundary.
- if errors.Is(err, syscall.EILSEQ) {
- return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
- }
- // ERANGE is returned when the `SYM[+offs]` token is too big and cannot
- // be resolved.
- if errors.Is(err, syscall.ERANGE) {
- return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
- }
- if err != nil {
- return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
- }
-
- return nil
-}
-
-// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
-// from /[k,u]probe_events.
-func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
- f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
- if err != nil {
- return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
- }
- defer f.Close()
-
- // See [k,u]probe_events syntax above. The probe type does not need to be specified
- // for removals.
- pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol))
- if _, err = f.WriteString(pe); err != nil {
- return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
- }
-
- return nil
-}
-
-// randomGroup generates a pseudorandom string for use as a tracefs group name.
-// Returns an error when the output string would exceed 63 characters (kernel
-// limitation), when rand.Read() fails or when prefix contains characters not
-// allowed by isValidTraceID.
-func randomGroup(prefix string) (string, error) {
- if !isValidTraceID(prefix) {
- return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
- }
-
- b := make([]byte, 8)
- if _, err := rand.Read(b); err != nil {
- return "", fmt.Errorf("reading random bytes: %w", err)
- }
-
- group := fmt.Sprintf("%s_%x", prefix, b)
- if len(group) > 63 {
- return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput)
- }
-
- return group, nil
-}
-
-func probePrefix(ret bool) string {
- if ret {
- return "r"
- }
- return "p"
-}
-
-// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit
-// from /sys/bus/event_source/devices//format/retprobe.
-func determineRetprobeBit(typ probeType) (uint64, error) {
- p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe")
-
- data, err := os.ReadFile(p)
- if err != nil {
- return 0, err
- }
-
- var rp uint64
- n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp)
- if err != nil {
- return 0, fmt.Errorf("parse retprobe bit: %w", err)
- }
- if n != 1 {
- return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n)
- }
-
- return rp, nil
-}
-
-func kretprobeBit() (uint64, error) {
- kprobeRetprobeBit.once.Do(func() {
- kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType)
- })
- return kprobeRetprobeBit.value, kprobeRetprobeBit.err
-}
-
-// kprobeToken creates the SYM[+offs] token for the tracefs api.
-func kprobeToken(args probeArgs) string {
- po := args.symbol
-
- if args.offset != 0 {
- po += fmt.Sprintf("+%#x", args.offset)
- }
-
- return po
+ return newPerfEvent(fd, evt), nil
}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
new file mode 100644
index 000000000000..697c6d7362a2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
@@ -0,0 +1,180 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// KprobeMultiOptions defines additional parameters that will be used
+// when opening a KprobeMulti Link.
+type KprobeMultiOptions struct {
+ // Symbols takes a list of kernel symbol names to attach an ebpf program to.
+ //
+ // Mutually exclusive with Addresses.
+ Symbols []string
+
+ // Addresses takes a list of kernel symbol addresses in case they can not
+ // be referred to by name.
+ //
+ // Note that only start addresses can be specified, since the fprobe API
+ // limits the attach point to the function entry or return.
+ //
+ // Mutually exclusive with Symbols.
+ Addresses []uintptr
+
+ // Cookies specifies arbitrary values that can be fetched from an eBPF
+ // program via `bpf_get_attach_cookie()`.
+ //
+ // If set, its length should be equal to the length of Symbols or Addresses.
+ // Each Cookie is assigned to the Symbol or Address specified at the
+ // corresponding slice index.
+ Cookies []uint64
+}
+
+// KprobeMulti attaches the given eBPF program to the entry point of a given set
+// of kernel symbols.
+//
+// The difference with Kprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+ return kprobeMulti(prog, opts, 0)
+}
+
+// KretprobeMulti attaches the given eBPF program to the return point of a given
+// set of kernel symbols.
+//
+// The difference with Kretprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+ return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN)
+}
+
+func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+
+ syms := uint32(len(opts.Symbols))
+ addrs := uint32(len(opts.Addresses))
+ cookies := uint32(len(opts.Cookies))
+
+ if syms == 0 && addrs == 0 {
+ return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput)
+ }
+ if syms != 0 && addrs != 0 {
+ return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput)
+ }
+ if cookies > 0 && cookies != syms && cookies != addrs {
+ return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput)
+ }
+
+ if err := haveBPFLinkKprobeMulti(); err != nil {
+ return nil, err
+ }
+
+ attr := &sys.LinkCreateKprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+ KprobeMultiFlags: flags,
+ }
+
+ switch {
+ case syms != 0:
+ attr.Count = syms
+ attr.Syms = sys.NewStringSlicePointer(opts.Symbols)
+
+ case addrs != 0:
+ attr.Count = addrs
+ attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0]))
+ }
+
+ if cookies != 0 {
+ attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0]))
+ }
+
+ fd, err := sys.LinkCreateKprobeMulti(attr)
+ if errors.Is(err, unix.ESRCH) {
+ return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist)
+ }
+ if errors.Is(err, unix.EINVAL) {
+ return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &kprobeMultiLink{RawLink{fd, ""}}, nil
+}
+
+type kprobeMultiLink struct {
+ RawLink
+}
+
+var _ Link = (*kprobeMultiLink)(nil)
+
+func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Pin(string) error {
+ return fmt.Errorf("pin kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Unpin() error {
+ return fmt.Errorf("unpin kprobe_multi: %w", ErrNotSupported)
+}
+
+var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_kpm_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ AttachType: ebpf.AttachTraceKprobeMulti,
+ License: "MIT",
+ })
+ if errors.Is(err, unix.E2BIG) {
+ // Kernel doesn't support AttachType field.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+ Count: 1,
+ Syms: sys.NewStringSlicePointer([]string{"vprintk"}),
+ })
+ switch {
+ case errors.Is(err, unix.EINVAL):
+ return internal.ErrNotSupported
+ // If CONFIG_FPROBE isn't set.
+ case errors.Is(err, unix.EOPNOTSUPP):
+ return internal.ErrNotSupported
+ case err != nil:
+ return err
+ }
+
+ fd.Close()
+
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
index 067d0101aa9f..36acd6ee4b95 100644
--- a/vendor/github.com/cilium/ebpf/link/link.go
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -46,6 +46,18 @@ type Link interface {
isLink()
}
+// NewLinkFromFD creates a link from a raw fd.
+//
+// You should not use fd after calling this function.
+func NewLinkFromFD(fd int) (Link, error) {
+ sysFD, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapRawLink(&RawLink{fd: sysFD})
+}
+
// LoadPinnedLink loads a link that was persisted into a bpffs.
func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
raw, err := loadPinnedRawLink(fileName, opts)
@@ -59,10 +71,15 @@ func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
// wrap a RawLink in a more specific type if possible.
//
// The function takes ownership of raw and closes it on error.
-func wrapRawLink(raw *RawLink) (Link, error) {
+func wrapRawLink(raw *RawLink) (_ Link, err error) {
+ defer func() {
+ if err != nil {
+ raw.Close()
+ }
+ }()
+
info, err := raw.Info()
if err != nil {
- raw.Close()
return nil, err
}
@@ -77,6 +94,10 @@ func wrapRawLink(raw *RawLink) (Link, error) {
return &Iter{*raw}, nil
case NetNsType:
return &NetNsLink{*raw}, nil
+ case KprobeMultiType:
+ return &kprobeMultiLink{*raw}, nil
+ case PerfEventType:
+ return nil, fmt.Errorf("recovering perf event fd: %w", ErrNotSupported)
default:
return raw, nil
}
@@ -172,12 +193,12 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
TargetFd: uint32(opts.Target),
ProgFd: uint32(progFd),
AttachType: sys.AttachType(opts.Attach),
- TargetBtfId: uint32(opts.BTF),
+ TargetBtfId: opts.BTF,
Flags: opts.Flags,
}
fd, err := sys.LinkCreate(&attr)
if err != nil {
- return nil, fmt.Errorf("can't create link: %s", err)
+ return nil, fmt.Errorf("create link: %w", err)
}
return &RawLink{fd, ""}, nil
@@ -230,6 +251,11 @@ func (l *RawLink) Unpin() error {
return nil
}
+// IsPinned returns true if the Link has a non-empty pinned path.
+func (l *RawLink) IsPinned() bool {
+ return l.pinnedPath != ""
+}
+
// Update implements the Link interface.
func (l *RawLink) Update(new *ebpf.Program) error {
return l.UpdateArgs(RawLinkUpdateOptions{
@@ -280,27 +306,24 @@ func (l *RawLink) Info() (*Info, error) {
switch info.Type {
case CgroupType:
extra = &CgroupInfo{}
- case IterType:
- // not supported
case NetNsType:
extra = &NetNsInfo{}
- case RawTracepointType:
- // not supported
case TracingType:
extra = &TracingInfo{}
case XDPType:
extra = &XDPInfo{}
- case PerfEventType:
- // no extra
+ case RawTracepointType, IterType,
+ PerfEventType, KprobeMultiType:
+ // Extra metadata not supported.
default:
return nil, fmt.Errorf("unknown link info type: %d", info.Type)
}
- if info.Type != RawTracepointType && info.Type != IterType && info.Type != PerfEventType {
+ if extra != nil {
buf := bytes.NewReader(info.Extra[:])
err := binary.Read(buf, internal.NativeEndian, extra)
if err != nil {
- return nil, fmt.Errorf("can not read extra link info: %w", err)
+ return nil, fmt.Errorf("cannot read extra link info: %w", err)
}
}
diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go
index 0e5bd47911bb..5f7a628b3d73 100644
--- a/vendor/github.com/cilium/ebpf/link/perf_event.go
+++ b/vendor/github.com/cilium/ebpf/link/perf_event.go
@@ -1,20 +1,16 @@
package link
import (
- "bytes"
"errors"
"fmt"
- "os"
- "path/filepath"
"runtime"
- "strconv"
- "strings"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
"github.com/cilium/ebpf/internal/unix"
)
@@ -42,67 +38,41 @@ import (
// stops any further invocations of the attached eBPF program.
var (
- tracefsPath = "/sys/kernel/debug/tracing"
-
- errInvalidInput = errors.New("invalid input")
+ errInvalidInput = tracefs.ErrInvalidInput
)
const (
perfAllThreads = -1
)
-type perfEventType uint8
-
-const (
- tracepointEvent perfEventType = iota
- kprobeEvent
- kretprobeEvent
- uprobeEvent
- uretprobeEvent
-)
-
// A perfEvent represents a perf event kernel object. Exactly one eBPF program
// can be attached to it. It is created based on a tracefs trace event or a
// Performance Monitoring Unit (PMU).
type perfEvent struct {
- // The event type determines the types of programs that can be attached.
- typ perfEventType
-
- // Group and name of the tracepoint/kprobe/uprobe.
- group string
- name string
-
- // PMU event ID read from sysfs. Valid IDs are non-zero.
- pmuID uint64
- // ID of the trace event read from tracefs. Valid IDs are non-zero.
- tracefsID uint64
-
- // User provided arbitrary value.
- cookie uint64
+ // Trace event backing this perfEvent. May be nil.
+ tracefsEvent *tracefs.Event
// This is the perf event FD.
fd *sys.FD
}
+func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent {
+ pe := &perfEvent{event, fd}
+ // Both event and fd have their own finalizer, but we want to
+ // guarantee that they are closed in a certain order.
+ runtime.SetFinalizer(pe, (*perfEvent).Close)
+ return pe
+}
+
func (pe *perfEvent) Close() error {
+ runtime.SetFinalizer(pe, nil)
+
if err := pe.fd.Close(); err != nil {
return fmt.Errorf("closing perf event fd: %w", err)
}
- switch pe.typ {
- case kprobeEvent, kretprobeEvent:
- // Clean up kprobe tracefs entry.
- if pe.tracefsID != 0 {
- return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
- }
- case uprobeEvent, uretprobeEvent:
- // Clean up uprobe tracefs entry.
- if pe.tracefsID != 0 {
- return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
- }
- case tracepointEvent:
- // Tracepoint trace events don't hold any extra resources.
- return nil
+ if pe.tracefsEvent != nil {
+ return pe.tracefsEvent.Close()
}
return nil
@@ -136,10 +106,14 @@ func (pl *perfEventLink) Unpin() error {
}
func (pl *perfEventLink) Close() error {
+ if err := pl.fd.Close(); err != nil {
+ return fmt.Errorf("perf link close: %w", err)
+ }
+
if err := pl.pe.Close(); err != nil {
- return fmt.Errorf("perf event link close: %w", err)
+ return fmt.Errorf("perf event close: %w", err)
}
- return pl.fd.Close()
+ return nil
}
func (pl *perfEventLink) Update(prog *ebpf.Program) error {
@@ -183,7 +157,7 @@ func (pi *perfEventIoctl) Info() (*Info, error) {
// attach the given eBPF prog to the perf event stored in pe.
// pe must contain a valid perf event fd.
// prog's type must match the program type stored in pe.
-func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
+func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) {
if prog == nil {
return nil, errors.New("cannot attach a nil program")
}
@@ -191,30 +165,18 @@ func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
}
- switch pe.typ {
- case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
- if t := prog.Type(); t != ebpf.Kprobe {
- return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
- }
- case tracepointEvent:
- if t := prog.Type(); t != ebpf.TracePoint {
- return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
- }
- default:
- return nil, fmt.Errorf("unknown perf event type: %d", pe.typ)
+ if err := haveBPFLinkPerfEvent(); err == nil {
+ return attachPerfEventLink(pe, prog, cookie)
}
- if err := haveBPFLinkPerfEvent(); err == nil {
- return attachPerfEventLink(pe, prog)
+ if cookie != 0 {
+ return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
}
+
return attachPerfEventIoctl(pe, prog)
}
func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
- if pe.cookie != 0 {
- return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
- }
-
// Assign the eBPF program to the perf event.
err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
if err != nil {
@@ -226,32 +188,24 @@ func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, e
return nil, fmt.Errorf("enable perf event: %s", err)
}
- pi := &perfEventIoctl{pe}
-
- // Close the perf event when its reference is lost to avoid leaking system resources.
- runtime.SetFinalizer(pi, (*perfEventIoctl).Close)
- return pi, nil
+ return &perfEventIoctl{pe}, nil
}
// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
//
// /~https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
-func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) {
+func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) {
fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
ProgFd: uint32(prog.FD()),
TargetFd: pe.fd.Uint(),
AttachType: sys.BPF_PERF_EVENT,
- BpfCookie: pe.cookie,
+ BpfCookie: cookie,
})
if err != nil {
return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
}
- pl := &perfEventLink{RawLink{fd: fd}, pe}
-
- // Close the perf event when its reference is lost to avoid leaking system resources.
- runtime.SetFinalizer(pl, (*perfEventLink).Close)
- return pl, nil
+ return &perfEventLink{RawLink{fd: fd}, pe}, nil
}
// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
@@ -263,40 +217,6 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) {
return unsafe.Pointer(p), nil
}
-// getTraceEventID reads a trace event's ID from tracefs given its group and name.
-// The kernel requires group and name to be alphanumeric or underscore.
-//
-// name automatically has its invalid symbols converted to underscores so the caller
-// can pass a raw symbol name, e.g. a kernel symbol containing dots.
-func getTraceEventID(group, name string) (uint64, error) {
- name = sanitizeSymbol(name)
- tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
- if errors.Is(err, os.ErrNotExist) {
- return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
- }
- if err != nil {
- return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
- }
-
- return tid, nil
-}
-
-// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier)
-// from /sys/bus/event_source/devices//type.
-//
-// Returns ErrNotSupported if the pmu type is not supported.
-func getPMUEventType(typ probeType) (uint64, error) {
- et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type")
- if errors.Is(err, os.ErrNotExist) {
- return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported)
- }
- if err != nil {
- return 0, fmt.Errorf("reading pmu type %s: %w", typ, err)
- }
-
- return et, nil
-}
-
// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
// [k,u]probes created by writing to /[k,u]probe_events are tracepoints
// behind the scenes, and can be attached to using these perf events.
@@ -317,30 +237,11 @@ func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
return sys.NewFD(fd)
}
-// uint64FromFile reads a uint64 from a file. All elements of path are sanitized
-// and joined onto base. Returns error if base no longer prefixes the path after
-// joining all components.
-func uint64FromFile(base string, path ...string) (uint64, error) {
- l := filepath.Join(path...)
- p := filepath.Join(base, l)
- if !strings.HasPrefix(p, base) {
- return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
- }
-
- data, err := os.ReadFile(p)
- if err != nil {
- return 0, fmt.Errorf("reading file %s: %w", p, err)
- }
-
- et := bytes.TrimSpace(data)
- return strconv.ParseUint(string(et), 10, 64)
-}
-
// Probe BPF perf link.
//
// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
// /~https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
-var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error {
+var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Name: "probe_bpf_perf_link",
Type: ebpf.Kprobe,
@@ -367,28 +268,3 @@ var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", f
}
return err
})
-
-// isValidTraceID implements the equivalent of a regex match
-// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
-//
-// Trace event groups, names and kernel symbols must adhere to this set
-// of characters. Non-empty, first character must not be a number, all
-// characters must be alphanumeric or underscore.
-func isValidTraceID(s string) bool {
- if len(s) < 1 {
- return false
- }
- for i, c := range []byte(s) {
- switch {
- case c >= 'a' && c <= 'z':
- case c >= 'A' && c <= 'Z':
- case c == '_':
- case i > 0 && c >= '0' && c <= '9':
-
- default:
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/cilium/ebpf/link/platform.go b/vendor/github.com/cilium/ebpf/link/platform.go
deleted file mode 100644
index eb6f7b7a3769..000000000000
--- a/vendor/github.com/cilium/ebpf/link/platform.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package link
-
-import (
- "fmt"
- "runtime"
-)
-
-func platformPrefix(symbol string) string {
-
- prefix := runtime.GOARCH
-
- // per /~https://github.com/golang/go/blob/master/src/go/build/syslist.go
- switch prefix {
- case "386":
- prefix = "ia32"
- case "amd64", "amd64p32":
- prefix = "x64"
- case "arm64", "arm64be":
- prefix = "arm64"
- default:
- return symbol
- }
-
- return fmt.Sprintf("__%s_%s", prefix, symbol)
-}
diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go
new file mode 100644
index 000000000000..c05656512d55
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/query.go
@@ -0,0 +1,63 @@
+package link
+
+import (
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+// QueryOptions defines additional parameters when querying for programs.
+type QueryOptions struct {
+ // Path can be a path to a cgroup, netns or LIRC2 device
+ Path string
+ // Attach specifies the AttachType of the programs queried for
+ Attach ebpf.AttachType
+ // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
+ QueryFlags uint32
+}
+
+// QueryPrograms retrieves ProgramIDs associated with the AttachType.
+//
+// Returns (nil, nil) if there are no programs attached to the queried kernel
+// resource. Calling QueryPrograms on a kernel missing PROG_QUERY will result in
+// ErrNotSupported.
+func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
+ if haveProgQuery() != nil {
+ return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
+ }
+
+ f, err := os.Open(opts.Path)
+ if err != nil {
+ return nil, fmt.Errorf("can't open file: %s", err)
+ }
+ defer f.Close()
+
+ // query the number of programs to allocate correct slice size
+ attr := sys.ProgQueryAttr{
+ TargetFd: uint32(f.Fd()),
+ AttachType: sys.AttachType(opts.Attach),
+ QueryFlags: opts.QueryFlags,
+ }
+ if err := sys.ProgQuery(&attr); err != nil {
+ return nil, fmt.Errorf("can't query program count: %w", err)
+ }
+
+ // return nil if no progs are attached
+ if attr.ProgCount == 0 {
+ return nil, nil
+ }
+
+ // we have at least one prog, so we query again
+ progIds := make([]ebpf.ProgramID, attr.ProgCount)
+ attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0]))
+ attr.ProgCount = uint32(len(progIds))
+ if err := sys.ProgQuery(&attr); err != nil {
+ return nil, fmt.Errorf("can't query program IDs: %w", err)
+ }
+
+ return progIds, nil
+
+}
diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go
index 94f3958cc4d8..84f0b656f80a 100644
--- a/vendor/github.com/cilium/ebpf/link/socket_filter.go
+++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go
@@ -15,7 +15,7 @@ func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error {
}
var ssoErr error
err = rawConn.Control(func(fd uintptr) {
- ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
+ ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
})
if ssoErr != nil {
return ssoErr
@@ -31,7 +31,7 @@ func DetachSocketFilter(conn syscall.Conn) error {
}
var ssoErr error
err = rawConn.Control(func(fd uintptr) {
- ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
+ ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
})
if ssoErr != nil {
return ssoErr
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
index a661395b360b..c9c998c2014c 100644
--- a/vendor/github.com/cilium/ebpf/link/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -23,9 +23,10 @@ const (
NetNsType = sys.BPF_LINK_TYPE_NETNS
XDPType = sys.BPF_LINK_TYPE_XDP
PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT
+ KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI
)
-var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
+var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.CGroupSKB,
License: "MIT",
@@ -45,7 +46,7 @@ var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() erro
return nil
})
-var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
+var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error {
if err := haveProgAttach(); err != nil {
return err
}
@@ -85,7 +86,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace
return err
})
-var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
+var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error {
attr := sys.LinkCreateAttr{
// This is a hopefully invalid file descriptor, which triggers EBADF.
TargetFd: ^uint32(0),
@@ -101,3 +102,22 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
}
return err
})
+
+var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error {
+ attr := sys.ProgQueryAttr{
+ // We rely on this being checked during the syscall.
+ // With an otherwise correct payload we expect EBADF here
+ // as an indication that the feature is present.
+ TargetFd: ^uint32(0),
+ AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+ }
+
+ err := sys.ProgQuery(&attr)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go
index a59ef9d1c527..95f5fae3b094 100644
--- a/vendor/github.com/cilium/ebpf/link/tracepoint.go
+++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go
@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/tracefs"
)
// TracepointOptions defines additional parameters that will be used
@@ -17,7 +18,7 @@ type TracepointOptions struct {
}
// Tracepoint attaches the given eBPF program to the tracepoint with the given
-// group and name. See /sys/kernel/debug/tracing/events to find available
+// group and name. See /sys/kernel/tracing/events to find available
// tracepoints. The top-level directory is the group, the event's subdirectory
// is the name. Example:
//
@@ -36,14 +37,11 @@ func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions)
if prog == nil {
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
}
- if !isValidTraceID(group) || !isValidTraceID(name) {
- return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput)
- }
if prog.Type() != ebpf.TracePoint {
return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput)
}
- tid, err := getTraceEventID(group, name)
+ tid, err := tracefs.EventID(group, name)
if err != nil {
return nil, err
}
@@ -58,16 +56,9 @@ func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions)
cookie = opts.Cookie
}
- pe := &perfEvent{
- typ: tracepointEvent,
- group: group,
- name: name,
- tracefsID: tid,
- cookie: cookie,
- fd: fd,
- }
+ pe := newPerfEvent(fd, nil)
- lnk, err := attachPerfEvent(pe, prog)
+ lnk, err := attachPerfEvent(pe, prog, cookie)
if err != nil {
pe.Close()
return nil, err
diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go
index e47e61a3b843..1e1a7834d8eb 100644
--- a/vendor/github.com/cilium/ebpf/link/tracing.go
+++ b/vendor/github.com/cilium/ebpf/link/tracing.go
@@ -1,11 +1,13 @@
package link
import (
+ "errors"
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
)
type tracing struct {
@@ -70,6 +72,10 @@ func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (
Attach: ebpf.AttachNone,
BTF: typeID,
})
+ if errors.Is(err, sys.ENOTSUPP) {
+ // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+ return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+ }
if err != nil {
return nil, err
}
@@ -82,25 +88,71 @@ type TracingOptions struct {
// AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or
// AttachTraceRawTp.
Program *ebpf.Program
+ // Program attach type. Can be one of:
+ // - AttachTraceFEntry
+ // - AttachTraceFExit
+ // - AttachModifyReturn
+ // - AttachTraceRawTp
+ // This field is optional.
+ AttachType ebpf.AttachType
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ Cookie uint64
}
type LSMOptions struct {
// Program must be of type LSM with attach type
// AttachLSMMac.
Program *ebpf.Program
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ Cookie uint64
}
// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id.
-func attachBTFID(program *ebpf.Program) (Link, error) {
+func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) {
if program.FD() < 0 {
return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd)
}
- fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
- ProgFd: uint32(program.FD()),
- })
- if err != nil {
- return nil, err
+ var (
+ fd *sys.FD
+ err error
+ )
+ switch at {
+ case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp,
+ ebpf.AttachModifyReturn, ebpf.AttachLSMMac:
+ // Attach via BPF link
+ fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{
+ ProgFd: uint32(program.FD()),
+ AttachType: sys.AttachType(at),
+ Cookie: cookie,
+ })
+ if err == nil {
+ break
+ }
+ if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) {
+ return nil, fmt.Errorf("create tracing link: %w", err)
+ }
+ fallthrough
+ case ebpf.AttachNone:
+ // Attach via RawTracepointOpen
+ if cookie > 0 {
+ return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported)
+ }
+
+ fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ ProgFd: uint32(program.FD()),
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+ return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("create raw tracepoint: %w", err)
+ }
+ default:
+ return nil, fmt.Errorf("invalid attach type: %s", at.String())
}
raw := RawLink{fd: fd}
@@ -115,8 +167,7 @@ func attachBTFID(program *ebpf.Program) (Link, error) {
// a raw_tracepoint link. Other types return a tracing link.
return &rawTracepoint{raw}, nil
}
-
- return &tracing{RawLink: RawLink{fd: fd}}, nil
+ return &tracing{raw}, nil
}
// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or
@@ -127,7 +178,14 @@ func AttachTracing(opts TracingOptions) (Link, error) {
return nil, fmt.Errorf("invalid program type %s, expected Tracing", t)
}
- return attachBTFID(opts.Program)
+ switch opts.AttachType {
+ case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn,
+ ebpf.AttachTraceRawTp, ebpf.AttachNone:
+ default:
+ return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String())
+ }
+
+ return attachBTFID(opts.Program, opts.AttachType, opts.Cookie)
}
// AttachLSM links a Linux security module (LSM) BPF Program to a BPF
@@ -137,5 +195,5 @@ func AttachLSM(opts LSMOptions) (Link, error) {
return nil, fmt.Errorf("invalid program type %s, expected LSM", t)
}
- return attachBTFID(opts.Program)
+ return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie)
}
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go
index edf925b57020..272bac4151d4 100644
--- a/vendor/github.com/cilium/ebpf/link/uprobe.go
+++ b/vendor/github.com/cilium/ebpf/link/uprobe.go
@@ -5,27 +5,18 @@ import (
"errors"
"fmt"
"os"
- "path/filepath"
- "strings"
"sync"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/tracefs"
)
var (
- uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
-
- uprobeRetprobeBit = struct {
- once sync.Once
- value uint64
- err error
- }{}
-
uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
uprobeRefCtrOffsetShift = 32
- haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error {
+ haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error {
_, err := os.Stat(uprobeRefCtrOffsetPMUPath)
if err != nil {
return internal.ErrNotSupported
@@ -44,6 +35,8 @@ type Executable struct {
path string
// Parsed ELF and dynamic symbols' addresses.
addresses map[string]uint64
+ // Keep track of symbol table lazy load.
+ addressesOnce sync.Once
}
// UprobeOptions defines additional parameters that will be used
@@ -77,11 +70,22 @@ type UprobeOptions struct {
//
// Needs kernel 5.15+.
Cookie uint64
+ // Prefix used for the event name if the uprobe must be attached using tracefs.
+ // The group name will be formatted as `_`.
+ // The default empty string is equivalent to "ebpf" as the prefix.
+ TraceFSPrefix string
+}
+
+func (uo *UprobeOptions) cookie() uint64 {
+ if uo == nil {
+ return 0
+ }
+ return uo.Cookie
}
// To open a new Executable, use:
//
-// OpenExecutable("/bin/bash")
+// OpenExecutable("/bin/bash")
//
// The returned value can then be used to open Uprobe(s).
func OpenExecutable(path string) (*Executable, error) {
@@ -89,32 +93,21 @@ func OpenExecutable(path string) (*Executable, error) {
return nil, fmt.Errorf("path cannot be empty")
}
- f, err := os.Open(path)
- if err != nil {
- return nil, fmt.Errorf("open file '%s': %w", path, err)
- }
- defer f.Close()
-
- se, err := internal.NewSafeELFFile(f)
+ f, err := internal.OpenSafeELFFile(path)
if err != nil {
return nil, fmt.Errorf("parse ELF file: %w", err)
}
+ defer f.Close()
- if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN {
+ if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN {
// ELF is not an executable or a shared object.
return nil, errors.New("the given file is not an executable or a shared object")
}
- ex := Executable{
+ return &Executable{
path: path,
addresses: make(map[string]uint64),
- }
-
- if err := ex.load(se); err != nil {
- return nil, err
- }
-
- return &ex, nil
+ }, nil
}
func (ex *Executable) load(f *internal.SafeELFFile) error {
@@ -171,6 +164,22 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error
return opts.Address + opts.Offset, nil
}
+ var err error
+ ex.addressesOnce.Do(func() {
+ var f *internal.SafeELFFile
+ f, err = internal.OpenSafeELFFile(ex.path)
+ if err != nil {
+ err = fmt.Errorf("parse ELF file: %w", err)
+ return
+ }
+ defer f.Close()
+
+ err = ex.load(f)
+ })
+ if err != nil {
+ return 0, fmt.Errorf("lazy load symbols: %w", err)
+ }
+
address, ok := ex.addresses[symbol]
if !ok {
return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
@@ -194,13 +203,13 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error
// given symbol starts executing in the given Executable.
// For example, /bin/bash::main():
//
-// ex, _ = OpenExecutable("/bin/bash")
-// ex.Uprobe("main", prog, nil)
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uprobe("main", prog, nil)
//
// When using symbols which belongs to shared libraries,
// an offset must be provided via options:
//
-// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
+// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
//
// Note: Setting the Offset field in the options supersedes the symbol's offset.
//
@@ -216,7 +225,7 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
return nil, err
}
- lnk, err := attachPerfEvent(u, prog)
+ lnk, err := attachPerfEvent(u, prog, opts.cookie())
if err != nil {
u.Close()
return nil, err
@@ -228,13 +237,13 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
// Uretprobe attaches the given eBPF program to a perf event that fires right
// before the given symbol exits. For example, /bin/bash::main():
//
-// ex, _ = OpenExecutable("/bin/bash")
-// ex.Uretprobe("main", prog, nil)
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uretprobe("main", prog, nil)
//
// When using symbols which belongs to shared libraries,
// an offset must be provided via options:
//
-// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
+// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
//
// Note: Setting the Offset field in the options supersedes the symbol's offset.
//
@@ -250,7 +259,7 @@ func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeO
return nil, err
}
- lnk, err := attachPerfEvent(u, prog)
+ lnk, err := attachPerfEvent(u, prog, opts.cookie())
if err != nil {
u.Close()
return nil, err
@@ -288,18 +297,20 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
}
}
- args := probeArgs{
- symbol: symbol,
- path: ex.path,
- offset: offset,
- pid: pid,
- refCtrOffset: opts.RefCtrOffset,
- ret: ret,
- cookie: opts.Cookie,
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Uprobe,
+ Symbol: symbol,
+ Path: ex.path,
+ Offset: offset,
+ Pid: pid,
+ RefCtrOffset: opts.RefCtrOffset,
+ Ret: ret,
+ Cookie: opts.Cookie,
+ Group: opts.TraceFSPrefix,
}
// Use uprobe PMU if the kernel has it available.
- tp, err := pmuUprobe(args)
+ tp, err := pmuProbe(args)
if err == nil {
return tp, nil
}
@@ -308,66 +319,10 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
}
// Use tracefs if uprobe PMU is missing.
- args.symbol = sanitizeSymbol(symbol)
- tp, err = tracefsUprobe(args)
+ tp, err = tracefsProbe(args)
if err != nil {
return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
}
return tp, nil
}
-
-// pmuUprobe opens a perf event based on the uprobe PMU.
-func pmuUprobe(args probeArgs) (*perfEvent, error) {
- return pmuProbe(uprobeType, args)
-}
-
-// tracefsUprobe creates a Uprobe tracefs entry.
-func tracefsUprobe(args probeArgs) (*perfEvent, error) {
- return tracefsProbe(uprobeType, args)
-}
-
-// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore.
-// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
-func sanitizeSymbol(s string) string {
- var b strings.Builder
- b.Grow(len(s))
- var skip bool
- for _, c := range []byte(s) {
- switch {
- case c >= 'a' && c <= 'z',
- c >= 'A' && c <= 'Z',
- c >= '0' && c <= '9':
- skip = false
- b.WriteByte(c)
-
- default:
- if !skip {
- b.WriteByte('_')
- skip = true
- }
- }
- }
-
- return b.String()
-}
-
-// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
-func uprobeToken(args probeArgs) string {
- po := fmt.Sprintf("%s:%#x", args.path, args.offset)
-
- if args.refCtrOffset != 0 {
- // This is not documented in Documentation/trace/uprobetracer.txt.
- // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
- po += fmt.Sprintf("(%#x)", args.refCtrOffset)
- }
-
- return po
-}
-
-func uretprobeBit() (uint64, error) {
- uprobeRetprobeBit.once.Do(func() {
- uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType)
- })
- return uprobeRetprobeBit.value, uprobeRetprobeBit.err
-}
diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go
index e6276b1829b2..e0dbfcffd377 100644
--- a/vendor/github.com/cilium/ebpf/linker.go
+++ b/vendor/github.com/cilium/ebpf/linker.go
@@ -1,14 +1,51 @@
package ebpf
import (
+ "encoding/binary"
"errors"
"fmt"
- "sync"
+ "io"
+ "math"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
)
+// handles stores handle objects to avoid gc cleanup
+type handles []*btf.Handle
+
+func (hs *handles) add(h *btf.Handle) (int, error) {
+ if h == nil {
+ return 0, nil
+ }
+
+ if len(*hs) == math.MaxInt16 {
+ return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16)
+ }
+
+ *hs = append(*hs, h)
+
+ // return length of slice so that indexes start at 1
+ return len(*hs), nil
+}
+
+func (hs handles) fdArray() []int32 {
+ // first element of fda is reserved as no module can be indexed with 0
+ fda := []int32{0}
+ for _, h := range hs {
+ fda = append(fda, int32(h.FD()))
+ }
+
+ return fda
+}
+
+func (hs handles) close() {
+ for _, h := range hs {
+ h.Close()
+ }
+}
+
// splitSymbols splits insns into subsections delimited by Symbol Instructions.
// insns cannot be empty and must start with a Symbol Instruction.
//
@@ -67,7 +104,7 @@ func hasFunctionReferences(insns asm.Instructions) bool {
//
// Passing a nil target will relocate against the running kernel. insns are
// modified in place.
-func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error {
+func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder) error {
var relos []*btf.CORERelocation
var reloInsns []*asm.Instruction
iter := insns.Iterate()
@@ -82,19 +119,18 @@ func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error {
return nil
}
- target, err := maybeLoadKernelBTF(target)
- if err != nil {
- return err
+ if bo == nil {
+ bo = internal.NativeEndian
}
- fixups, err := btf.CORERelocate(local, target, relos)
+ fixups, err := btf.CORERelocate(relos, target, bo)
if err != nil {
return err
}
for i, fixup := range fixups {
if err := fixup.Apply(reloInsns[i]); err != nil {
- return fmt.Errorf("apply fixup %s: %w", &fixup, err)
+ return fmt.Errorf("fixup for %s: %w", relos[i], err)
}
}
@@ -181,8 +217,9 @@ func fixupAndValidate(insns asm.Instructions) error {
ins := iter.Ins
// Map load was tagged with a Reference, but does not contain a Map pointer.
- if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil {
- return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference)
+ needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil
+ if ins.IsLoadFromMap() && needsMap && ins.Map() == nil {
+ return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference)
}
fixupProbeReadKernel(ins)
@@ -191,6 +228,88 @@ func fixupAndValidate(insns asm.Instructions) error {
return nil
}
+// fixupKfuncs loops over all instructions in search for kfunc calls.
+// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant
+// and Instruction.Offset to the correct values.
+func fixupKfuncs(insns asm.Instructions) (handles, error) {
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+ if ins.IsKfuncCall() {
+ goto fixups
+ }
+ }
+
+ return nil, nil
+
+fixups:
+ // only load the kernel spec if we found at least one kfunc call
+ kernelSpec, err := btf.LoadKernelSpec()
+ if err != nil {
+ return nil, err
+ }
+
+ fdArray := make(handles, 0)
+ for {
+ ins := iter.Ins
+
+ if !ins.IsKfuncCall() {
+ if !iter.Next() {
+ // break loop if this was the last instruction in the stream.
+ break
+ }
+ continue
+ }
+
+ // check meta, if no meta return err
+ kfm, _ := ins.Metadata.Get(kfuncMeta{}).(*btf.Func)
+ if kfm == nil {
+ return nil, fmt.Errorf("kfunc call has no kfuncMeta")
+ }
+
+ target := btf.Type((*btf.Func)(nil))
+ spec, module, err := findTargetInKernel(kernelSpec, kfm.Name, &target)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, fmt.Errorf("kfunc %q: %w", kfm.Name, ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if err := btf.CheckTypeCompatibility(kfm.Type, target.(*btf.Func).Type); err != nil {
+ return nil, &incompatibleKfuncError{kfm.Name, err}
+ }
+
+ id, err := spec.TypeID(target)
+ if err != nil {
+ return nil, err
+ }
+
+ idx, err := fdArray.add(module)
+ if err != nil {
+ return nil, err
+ }
+
+ ins.Constant = int64(id)
+ ins.Offset = int16(idx)
+
+ if !iter.Next() {
+ break
+ }
+ }
+
+ return fdArray, nil
+}
+
+type incompatibleKfuncError struct {
+ name string
+ err error
+}
+
+func (ike *incompatibleKfuncError) Error() string {
+ return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err)
+}
+
// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
// with bpf_probe_read(_str) on kernels that don't support it yet.
func fixupProbeReadKernel(ins *asm.Instruction) {
@@ -211,28 +330,62 @@ func fixupProbeReadKernel(ins *asm.Instruction) {
}
}
-var kernelBTF struct {
- sync.Mutex
- spec *btf.Spec
-}
-
-// maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise
-// it returns spec unchanged.
+// resolveKconfigReferences creates and populates a .kconfig map if necessary.
//
-// The kernel BTF is cached for the lifetime of the process.
-func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) {
- if spec != nil {
- return spec, nil
+// Returns a nil Map and no error if no references exist.
+func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
}
- kernelBTF.Lock()
- defer kernelBTF.Unlock()
+ var spec *MapSpec
+ iter := insns.Iterate()
+ for iter.Next() {
+ meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+ if meta != nil {
+ spec = meta.Map
+ break
+ }
+ }
+
+ if spec == nil {
+ return nil, nil
+ }
+
+ cpy := spec.Copy()
+ if err := resolveKconfig(cpy); err != nil {
+ return nil, err
+ }
+
+ kconfig, err := NewMap(cpy)
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(kconfig)
+
+ // Resolve all instructions which load from .kconfig map with actual map
+ // and offset inside it.
+ iter = insns.Iterate()
+ for iter.Next() {
+ meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+ if meta == nil {
+ continue
+ }
+
+ if meta.Map != spec {
+ return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index)
+ }
+
+ if err := iter.Ins.AssociateMap(kconfig); err != nil {
+ return nil, fmt.Errorf("instruction %d: %w", iter.Index, err)
+ }
- if kernelBTF.spec != nil {
- return kernelBTF.spec, nil
+ // Encode a map read at the offset of the var in the datasec.
+ iter.Ins.Constant = int64(uint64(meta.Offset) << 32)
+ iter.Ins.Metadata.Set(kconfigMetaKey{}, nil)
}
- var err error
- kernelBTF.spec, err = btf.LoadKernelSpec()
- return kernelBTF.spec, err
+ return kconfig, nil
}
diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go
index e4a6c87e924b..a11664cc72da 100644
--- a/vendor/github.com/cilium/ebpf/map.go
+++ b/vendor/github.com/cilium/ebpf/map.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"math/rand"
+ "os"
"path/filepath"
"reflect"
"time"
@@ -77,9 +78,6 @@ type MapSpec struct {
// The key and value type of this map. May be nil.
Key, Value btf.Type
-
- // The BTF associated with this map.
- BTF *btf.Spec
}
func (ms *MapSpec) String() string {
@@ -104,12 +102,6 @@ func (ms *MapSpec) Copy() *MapSpec {
return &cpy
}
-// hasBTF returns true if the MapSpec has a valid BTF spec and if its
-// map type supports associated BTF metadata in the kernel.
-func (ms *MapSpec) hasBTF() bool {
- return ms.BTF != nil && ms.Type.hasBTF()
-}
-
func (ms *MapSpec) clampPerfEventArraySize() error {
if ms.Type != PerfEventArray {
return nil
@@ -158,7 +150,11 @@ type MapKV struct {
Value interface{}
}
-func (ms *MapSpec) checkCompatibility(m *Map) error {
+// Compatible returns nil if an existing map may be used instead of creating
+// one from the spec.
+//
+// Returns an error wrapping [ErrMapIncompatible] otherwise.
+func (ms *MapSpec) Compatible(m *Map) error {
switch {
case m.typ != ms.Type:
return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible)
@@ -173,7 +169,10 @@ func (ms *MapSpec) checkCompatibility(m *Map) error {
m.maxEntries != ms.MaxEntries:
return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible)
- case m.flags != ms.Flags:
+ // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow
+ // this mismatch.
+ case !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) &&
+ m.flags != ms.Flags:
return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible)
}
return nil
@@ -241,10 +240,7 @@ func NewMap(spec *MapSpec) (*Map, error) {
//
// May return an error wrapping ErrMapIncompatible.
func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
- handles := newHandleCache()
- defer handles.close()
-
- m, err := newMapWithOptions(spec, opts, handles)
+ m, err := newMapWithOptions(spec, opts)
if err != nil {
return nil, fmt.Errorf("creating map: %w", err)
}
@@ -257,7 +253,7 @@ func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
return m, nil
}
-func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) {
+func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) {
closeOnError := func(c io.Closer) {
if err != nil {
c.Close()
@@ -284,7 +280,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
}
defer closeOnError(m)
- if err := spec.checkCompatibility(m); err != nil {
+ if err := spec.Compatible(m); err != nil {
return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
}
@@ -307,7 +303,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
return nil, errors.New("inner maps cannot be pinned")
}
- template, err := spec.InnerMap.createMap(nil, opts, handles)
+ template, err := spec.InnerMap.createMap(nil, opts)
if err != nil {
return nil, fmt.Errorf("inner map: %w", err)
}
@@ -319,7 +315,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
innerFd = template.fd
}
- m, err := spec.createMap(innerFd, opts, handles)
+ m, err := spec.createMap(innerFd, opts)
if err != nil {
return nil, err
}
@@ -328,7 +324,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
if spec.Pinning == PinByName {
path := filepath.Join(opts.PinPath, spec.Name)
if err := m.Pin(path); err != nil {
- return nil, fmt.Errorf("pin map: %w", err)
+ return nil, fmt.Errorf("pin map to %s: %w", path, err)
}
}
@@ -337,15 +333,13 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
// createMap validates the spec's properties and creates the map in the kernel
// using the given opts. It does not populate or freeze the map.
-func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) {
+func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) {
closeOnError := func(closer io.Closer) {
if err != nil {
closer.Close()
}
}
- spec = spec.Copy()
-
// Kernels 4.13 through 5.4 used a struct bpf_map_def that contained
// additional 'inner_map_idx' and later 'numa_node' fields.
// In order to support loading these definitions, tolerate the presence of
@@ -365,17 +359,21 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
if spec.ValueSize != 0 && spec.ValueSize != 4 {
return nil, errors.New("ValueSize must be zero or four for map of map")
}
+
+ spec = spec.Copy()
spec.ValueSize = 4
case PerfEventArray:
if spec.KeySize != 0 && spec.KeySize != 4 {
return nil, errors.New("KeySize must be zero or four for perf event array")
}
- spec.KeySize = 4
if spec.ValueSize != 0 && spec.ValueSize != 4 {
return nil, errors.New("ValueSize must be zero or four for perf event array")
}
+
+ spec = spec.Copy()
+ spec.KeySize = 4
spec.ValueSize = 4
if spec.MaxEntries == 0 {
@@ -413,7 +411,7 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
KeySize: spec.KeySize,
ValueSize: spec.ValueSize,
MaxEntries: spec.MaxEntries,
- MapFlags: spec.Flags,
+ MapFlags: sys.MapFlags(spec.Flags),
NumaNode: spec.NumaNode,
}
@@ -425,40 +423,43 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
attr.MapName = sys.NewObjName(spec.Name)
}
- if spec.hasBTF() {
- handle, err := handles.btfHandle(spec.BTF)
+ if spec.Key != nil || spec.Value != nil {
+ handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value)
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
- keyTypeID, err := spec.BTF.TypeID(spec.Key)
- if err != nil {
- return nil, err
- }
-
- valueTypeID, err := spec.BTF.TypeID(spec.Value)
- if err != nil {
- return nil, err
- }
+ defer handle.Close()
+ // Use BTF k/v during map creation.
attr.BtfFd = uint32(handle.FD())
- attr.BtfKeyTypeId = uint32(keyTypeID)
- attr.BtfValueTypeId = uint32(valueTypeID)
+ attr.BtfKeyTypeId = keyTypeID
+ attr.BtfValueTypeId = valueTypeID
}
}
fd, err := sys.MapCreate(&attr)
+ // Some map types don't support BTF k/v in earlier kernel versions.
+ // Remove BTF metadata and retry map creation.
+ if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 {
+ attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0
+ fd, err = sys.MapCreate(&attr)
+ }
+
if err != nil {
if errors.Is(err, unix.EPERM) {
return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
}
- if !spec.hasBTF() {
- return nil, fmt.Errorf("map create without BTF: %w", err)
- }
if errors.Is(err, unix.EINVAL) && attr.MaxEntries == 0 {
return nil, fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err)
}
+ if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap {
+ return nil, fmt.Errorf("map create: cannot use type %s", UnspecifiedMap)
+ }
+ if attr.BtfFd == 0 {
+ return nil, fmt.Errorf("map create: %w (without BTF k/v)", err)
+ }
return nil, fmt.Errorf("map create: %w", err)
}
defer closeOnError(fd)
@@ -495,7 +496,7 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries
return nil, err
}
- m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs
+ m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs
return m, nil
}
@@ -549,12 +550,7 @@ const LookupLock MapLookupFlags = 4
//
// Returns an error if the key doesn't exist, see ErrKeyNotExist.
func (m *Map) Lookup(key, valueOut interface{}) error {
- valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
- if err := m.lookup(key, valuePtr, 0); err != nil {
- return err
- }
-
- return m.unmarshalValue(valueOut, valueBytes)
+ return m.LookupWithFlags(key, valueOut, 0)
}
// LookupWithFlags retrieves a value from a Map with flags.
@@ -568,6 +564,10 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
//
// Returns an error if the key doesn't exist, see ErrKeyNotExist.
func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.lookupPerCPU(key, valueOut, flags)
+ }
+
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
if err := m.lookup(key, valuePtr, flags); err != nil {
return err
@@ -580,7 +580,7 @@ func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) e
//
// Returns ErrKeyNotExist if the key doesn't exist.
func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
- return m.lookupAndDelete(key, valueOut, 0)
+ return m.LookupAndDeleteWithFlags(key, valueOut, 0)
}
// LookupAndDeleteWithFlags retrieves and deletes a value from a Map.
@@ -591,7 +591,15 @@ func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
//
// Returns ErrKeyNotExist if the key doesn't exist.
func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
- return m.lookupAndDelete(key, valueOut, flags)
+ if m.typ.hasPerCPUValue() {
+ return m.lookupAndDeletePerCPU(key, valueOut, flags)
+ }
+
+ valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
+ if err := m.lookupAndDelete(key, valuePtr, flags); err != nil {
+ return err
+ }
+ return m.unmarshalValue(valueOut, valueBytes)
}
// LookupBytes gets a value from Map.
@@ -609,6 +617,14 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
return valueBytes, err
}
+func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error {
+ valueBytes := make([]byte, m.fullValueSize)
+ if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+ return err
+ }
+ return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+}
+
func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
keyPtr, err := m.marshalKey(key)
if err != nil {
@@ -628,9 +644,15 @@ func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags
return nil
}
-func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error {
- valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
+func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error {
+ valueBytes := make([]byte, m.fullValueSize)
+ if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+ return err
+ }
+ return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+}
+func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error {
keyPtr, err := m.marshalKey(key)
if err != nil {
return fmt.Errorf("can't marshal key: %w", err)
@@ -647,7 +669,7 @@ func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) e
return fmt.Errorf("lookup and delete: %w", wrapMapError(err))
}
- return m.unmarshalValue(valueOut, valueBytes)
+ return nil
}
// MapUpdateFlags controls the behaviour of the Map.Update call.
@@ -674,15 +696,32 @@ func (m *Map) Put(key, value interface{}) error {
}
// Update changes the value of a key.
-func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
- keyPtr, err := m.marshalKey(key)
- if err != nil {
- return fmt.Errorf("can't marshal key: %w", err)
+func (m *Map) Update(key, value any, flags MapUpdateFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.updatePerCPU(key, value, flags)
}
valuePtr, err := m.marshalValue(value)
if err != nil {
- return fmt.Errorf("can't marshal value: %w", err)
+ return fmt.Errorf("marshal value: %w", err)
+ }
+
+ return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error {
+ valuePtr, err := marshalPerCPUValue(value, int(m.valueSize))
+ if err != nil {
+ return fmt.Errorf("marshal value: %w", err)
+ }
+
+ return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("marshal key: %w", err)
}
attr := sys.MapUpdateElemAttr{
@@ -798,12 +837,22 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error {
return nil
}
+var mmapProtectedPage = internal.Memoize(func() ([]byte, error) {
+ return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED)
+})
+
// guessNonExistentKey attempts to perform a map lookup that returns ENOENT.
// This is necessary on kernels before 4.4.132, since those don't support
// iterating maps from the start by providing an invalid key pointer.
func (m *Map) guessNonExistentKey() ([]byte, error) {
- // Provide an invalid value pointer to prevent a copy on the kernel side.
- valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0)))
+ // Map a protected page and use that as the value pointer. This saves some
+ // work copying out the value, which we're not interested in.
+ page, err := mmapProtectedPage()
+ if err != nil {
+ return nil, err
+ }
+ valuePtr := sys.NewSlicePointer(page)
+
randKey := make([]byte, int(m.keySize))
for i := 0; i < 4; i++ {
@@ -1095,7 +1144,8 @@ func (m *Map) Clone() (*Map, error) {
// the new path already exists. Re-pinning across filesystems is not supported.
// You can Clone a map to pin it to a different path.
//
-// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
+// This requires bpffs to be mounted above fileName.
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
func (m *Map) Pin(fileName string) error {
if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil {
return err
@@ -1180,10 +1230,6 @@ func (m *Map) unmarshalKey(data interface{}, buf []byte) error {
}
func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) {
- if m.typ.hasPerCPUValue() {
- return marshalPerCPUValue(data, int(m.valueSize))
- }
-
var (
buf []byte
err error
@@ -1316,8 +1362,7 @@ func marshalMap(m *Map, length int) ([]byte, error) {
// See Map.Iterate.
type MapIterator struct {
target *Map
- prevKey interface{}
- prevBytes []byte
+ curKey []byte
count, maxEntries uint32
done bool
err error
@@ -1327,7 +1372,6 @@ func newMapIterator(target *Map) *MapIterator {
return &MapIterator{
target: target,
maxEntries: target.maxEntries,
- prevBytes: make([]byte, target.keySize),
}
}
@@ -1349,26 +1393,35 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
// For array-like maps NextKeyBytes returns nil only on after maxEntries
// iterations.
for mi.count <= mi.maxEntries {
- var nextBytes []byte
- nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey)
+ var nextKey []byte
+ if mi.curKey == nil {
+ // Pass nil interface to NextKeyBytes to make sure the Map's first key
+ // is returned. If we pass an uninitialized []byte instead, it'll see a
+ // non-nil interface and try to marshal it.
+ nextKey, mi.err = mi.target.NextKeyBytes(nil)
+
+ mi.curKey = make([]byte, mi.target.keySize)
+ } else {
+ nextKey, mi.err = mi.target.NextKeyBytes(mi.curKey)
+ }
if mi.err != nil {
+ mi.err = fmt.Errorf("get next key: %w", mi.err)
return false
}
- if nextBytes == nil {
+ if nextKey == nil {
mi.done = true
return false
}
- // The user can get access to nextBytes since unmarshalBytes
+ // The user can get access to nextKey since unmarshalBytes
// does not copy when unmarshaling into a []byte.
// Make a copy to prevent accidental corruption of
// iterator state.
- copy(mi.prevBytes, nextBytes)
- mi.prevKey = mi.prevBytes
+ copy(mi.curKey, nextKey)
mi.count++
- mi.err = mi.target.Lookup(nextBytes, valueOut)
+ mi.err = mi.target.Lookup(nextKey, valueOut)
if errors.Is(mi.err, ErrKeyNotExist) {
// Even though the key should be valid, we couldn't look up
// its value. If we're iterating a hash map this is probably
@@ -1381,10 +1434,11 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
continue
}
if mi.err != nil {
+ mi.err = fmt.Errorf("look up next key: %w", mi.err)
return false
}
- mi.err = mi.target.unmarshalKey(keyOut, nextBytes)
+ mi.err = mi.target.unmarshalKey(keyOut, nextKey)
return mi.err == nil
}
diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go
index 544d17f35e1a..a568bff92074 100644
--- a/vendor/github.com/cilium/ebpf/marshalers.go
+++ b/vendor/github.com/cilium/ebpf/marshalers.go
@@ -57,8 +57,10 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
case Map, *Map, Program, *Program:
err = fmt.Errorf("can't marshal %T", value)
default:
- var wr bytes.Buffer
- err = binary.Write(&wr, internal.NativeEndian, value)
+ wr := internal.NewBuffer(make([]byte, 0, length))
+ defer internal.PutBuffer(wr)
+
+ err = binary.Write(wr, internal.NativeEndian, value)
if err != nil {
err = fmt.Errorf("encoding %T: %v", value, err)
}
diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go
index 675edc711d74..70aaef55327a 100644
--- a/vendor/github.com/cilium/ebpf/prog.go
+++ b/vendor/github.com/cilium/ebpf/prog.go
@@ -10,6 +10,7 @@ import (
"runtime"
"strings"
"time"
+ "unsafe"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
@@ -35,16 +36,44 @@ const (
// verifier log.
const DefaultVerifierLogSize = 64 * 1024
+// maxVerifierLogSize is the maximum size of verifier log buffer the kernel
+// will accept before returning EINVAL.
+const maxVerifierLogSize = math.MaxUint32 >> 2
+
// ProgramOptions control loading a program into the kernel.
type ProgramOptions struct {
- // Controls the detail emitted by the kernel verifier. Set to non-zero
- // to enable logging.
- LogLevel uint32
- // Controls the output buffer size for the verifier. Defaults to
- // DefaultVerifierLogSize.
+ // Bitmap controlling the detail emitted by the kernel's eBPF verifier log.
+ // LogLevel-type values can be ORed together to request specific kinds of
+ // verifier output. See the documentation on [ebpf.LogLevel] for details.
+ //
+ // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats)
+ //
+ // If left to its default value, the program will first be loaded without
+ // verifier output enabled. Upon error, the program load will be repeated
+ // with LogLevelBranch and the given (or default) LogSize value.
+ //
+ // Setting this to a non-zero value will unconditionally enable the verifier
+ // log, populating the [ebpf.Program.VerifierLog] field on successful loads
+ // and including detailed verifier errors if the program is rejected. This
+ // will always allocate an output buffer, but will result in only a single
+ // attempt at loading the program.
+ LogLevel LogLevel
+
+ // Controls the output buffer size for the verifier log, in bytes. See the
+ // documentation on ProgramOptions.LogLevel for details about how this value
+ // is used.
+ //
+ // If this value is set too low to fit the verifier log, the resulting
+ // [ebpf.VerifierError]'s Truncated flag will be true, and the error string
+ // will also contain a hint to that effect.
+ //
+ // Defaults to DefaultVerifierLogSize.
LogSize int
- // Type information used for CO-RE relocations and when attaching to
- // kernel functions.
+
+ // Disables the verifier log completely, regardless of other options.
+ LogDisabled bool
+
+ // Type information used for CO-RE relocations.
//
// This is useful in environments where the kernel BTF is not available
// (containers) or where it is in a non-standard location. Defaults to
@@ -74,7 +103,7 @@ type ProgramSpec struct {
// The program to attach to. Must be provided manually.
AttachTarget *Program
- // The name of the ELF section this program orininated from.
+ // The name of the ELF section this program originated from.
SectionName string
Instructions asm.Instructions
@@ -95,11 +124,6 @@ type ProgramSpec struct {
// detect this value automatically.
KernelVersion uint32
- // The BTF associated with this program. Changing Instructions
- // will most likely invalidate the contained data, and may
- // result in errors when attempting to load it into the kernel.
- BTF *btf.Spec
-
// The byte order this program was compiled for, may be nil.
ByteOrder binary.ByteOrder
}
@@ -123,6 +147,10 @@ func (ps *ProgramSpec) Tag() (string, error) {
return ps.Instructions.Tag(internal.NativeEndian)
}
+// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a
+// program is rejected by the verifier.
+//
+// Use [errors.As] to access the error.
type VerifierError = internal.VerifierError
// Program represents BPF program loaded into the kernel.
@@ -141,7 +169,10 @@ type Program struct {
// NewProgram creates a new Program.
//
-// See NewProgramWithOptions for details.
+// See [NewProgramWithOptions] for details.
+//
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
func NewProgram(spec *ProgramSpec) (*Program, error) {
return NewProgramWithOptions(spec, ProgramOptions{})
}
@@ -151,24 +182,21 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
// Loading a program for the first time will perform
// feature detection by loading small, temporary programs.
//
-// Returns an error wrapping VerifierError if the program or its BTF is rejected
-// by the kernel.
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
if spec == nil {
return nil, errors.New("can't load a program from a nil spec")
}
- handles := newHandleCache()
- defer handles.close()
-
- prog, err := newProgramWithOptions(spec, opts, handles)
+ prog, err := newProgramWithOptions(spec, opts)
if errors.Is(err, asm.ErrUnsatisfiedMapReference) {
return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
}
return prog, err
}
-func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) {
+func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
if len(spec.Instructions) == 0 {
return nil, errors.New("instructions cannot be empty")
}
@@ -181,6 +209,10 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
}
+ if opts.LogSize < 0 {
+ return nil, errors.New("ProgramOptions.LogSize must be a positive value; disable verifier logs using ProgramOptions.LogDisabled")
+ }
+
// Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load")
// require the version field to be set to the value of the KERNEL_VERSION
// macro for kprobe-type programs.
@@ -206,47 +238,54 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
attr.ProgName = sys.NewObjName(spec.Name)
}
- kernelTypes := opts.KernelTypes
-
insns := make(asm.Instructions, len(spec.Instructions))
copy(insns, spec.Instructions)
- var btfDisabled bool
- if spec.BTF != nil {
- if err := applyRelocations(insns, spec.BTF, kernelTypes); err != nil {
- return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
- }
+ handle, fib, lib, err := btf.MarshalExtInfos(insns)
+ if err != nil && !errors.Is(err, btf.ErrNotSupported) {
+ return nil, fmt.Errorf("load ext_infos: %w", err)
+ }
+ if handle != nil {
+ defer handle.Close()
- handle, err := handles.btfHandle(spec.BTF)
- btfDisabled = errors.Is(err, btf.ErrNotSupported)
- if err != nil && !btfDisabled {
- return nil, fmt.Errorf("load BTF: %w", err)
- }
+ attr.ProgBtfFd = uint32(handle.FD())
- if handle != nil {
- attr.ProgBtfFd = uint32(handle.FD())
+ attr.FuncInfoRecSize = btf.FuncInfoSize
+ attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
+ attr.FuncInfo = sys.NewSlicePointer(fib)
- fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID)
- if err != nil {
- return nil, err
- }
+ attr.LineInfoRecSize = btf.LineInfoSize
+ attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
+ attr.LineInfo = sys.NewSlicePointer(lib)
+ }
- attr.FuncInfoRecSize = btf.FuncInfoSize
- attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
- attr.FuncInfo = sys.NewSlicePointer(fib)
+ if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder); err != nil {
+ return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
+ }
- attr.LineInfoRecSize = btf.LineInfoSize
- attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
- attr.LineInfo = sys.NewSlicePointer(lib)
- }
+ kconfig, err := resolveKconfigReferences(insns)
+ if err != nil {
+ return nil, fmt.Errorf("resolve .kconfig: %w", err)
}
+ defer kconfig.Close()
if err := fixupAndValidate(insns); err != nil {
return nil, err
}
+ handles, err := fixupKfuncs(insns)
+ if err != nil {
+ return nil, fmt.Errorf("fixing up kfuncs: %w", err)
+ }
+ defer handles.close()
+
+ if len(handles) > 0 {
+ fdArray := handles.fdArray()
+ attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0]))
+ }
+
buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
- err := insns.Marshal(buf, internal.NativeEndian)
+ err = insns.Marshal(buf, internal.NativeEndian)
if err != nil {
return nil, err
}
@@ -261,28 +300,32 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
}
- attr.AttachBtfId = uint32(targetID)
- attr.AttachProgFd = uint32(spec.AttachTarget.FD())
+ attr.AttachBtfId = targetID
+ attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD())
defer runtime.KeepAlive(spec.AttachTarget)
} else if spec.AttachTo != "" {
- targetID, err := findTargetInKernel(kernelTypes, spec.AttachTo, spec.Type, spec.AttachType)
+ module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType)
if err != nil && !errors.Is(err, errUnrecognizedAttachType) {
// We ignore errUnrecognizedAttachType since AttachTo may be non-empty
// for programs that don't attach anywhere.
return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
}
- attr.AttachBtfId = uint32(targetID)
+ attr.AttachBtfId = targetID
+ if module != nil {
+ attr.AttachBtfObjFd = uint32(module.FD())
+ defer module.Close()
+ }
}
- logSize := DefaultVerifierLogSize
- if opts.LogSize > 0 {
- logSize = opts.LogSize
+ if opts.LogSize == 0 {
+ opts.LogSize = DefaultVerifierLogSize
}
+ // The caller requested a specific verifier log level. Set up the log buffer.
var logBuf []byte
- if opts.LogLevel > 0 {
- logBuf = make([]byte, logSize)
+ if !opts.LogDisabled && opts.LogLevel != 0 {
+ logBuf = make([]byte, opts.LogSize)
attr.LogLevel = opts.LogLevel
attr.LogSize = uint32(len(logBuf))
attr.LogBuf = sys.NewSlicePointer(logBuf)
@@ -293,13 +336,19 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil
}
- if opts.LogLevel == 0 && opts.LogSize >= 0 {
- // Re-run with the verifier enabled to get better error messages.
- logBuf = make([]byte, logSize)
- attr.LogLevel = 1
+ // An error occurred loading the program, but the caller did not explicitly
+ // enable the verifier log. Re-run with branch-level verifier logs enabled to
+ // obtain more info. Preserve the original error to return it to the caller.
+ // An undersized log buffer will result in ENOSPC regardless of the underlying
+ // cause.
+ var err2 error
+ if !opts.LogDisabled && opts.LogLevel == 0 {
+ logBuf = make([]byte, opts.LogSize)
+ attr.LogLevel = LogLevelBranch
attr.LogSize = uint32(len(logBuf))
attr.LogBuf = sys.NewSlicePointer(logBuf)
- _, _ = sys.ProgLoad(attr)
+
+ _, err2 = sys.ProgLoad(attr)
}
switch {
@@ -318,13 +367,14 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
return nil, fmt.Errorf("load program: %w", err)
}
}
- }
- err = internal.ErrorWithLog(err, logBuf)
- if btfDisabled {
- return nil, fmt.Errorf("load program: %w (BTF disabled)", err)
+ if opts.LogSize > maxVerifierLogSize {
+ return nil, fmt.Errorf("load program: %w (ProgramOptions.LogSize exceeds maximum value of %d)", err, maxVerifierLogSize)
+ }
}
- return nil, fmt.Errorf("load program: %w", err)
+
+ truncated := errors.Is(err, unix.ENOSPC) || errors.Is(err2, unix.ENOSPC)
+ return nil, internal.ErrorWithLog("load program", err, logBuf, truncated)
}
// NewProgramFromFD creates a program from a raw fd.
@@ -362,7 +412,7 @@ func newProgramFromFD(fd *sys.FD) (*Program, error) {
return nil, fmt.Errorf("discover program type: %w", err)
}
- return &Program{"", fd, "", "", info.Type}, nil
+ return &Program{"", fd, info.Name, "", info.Type}, nil
}
func (p *Program) String() string {
@@ -433,7 +483,8 @@ func (p *Program) Clone() (*Program, error) {
// Calling Pin on a previously pinned program will overwrite the path, except when
// the new path already exists. Re-pinning across filesystems is not supported.
//
-// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
+// This requires bpffs to be mounted above fileName.
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
func (p *Program) Pin(fileName string) error {
if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil {
return err
@@ -474,6 +525,9 @@ func (p *Program) Close() error {
// Various options for Run'ing a Program
type RunOptions struct {
// Program's data input. Required field.
+ //
+ // The kernel expects at least 14 bytes input for an ethernet header for
+ // XDP and SKB programs.
Data []byte
// Program's data after Program has run. Caller must allocate. Optional field.
DataOut []byte
@@ -481,7 +535,10 @@ type RunOptions struct {
Context interface{}
// Program's context after Program has run. Must be a pointer or slice. Optional field.
ContextOut interface{}
- // Number of times to run Program. Optional field. Defaults to 1.
+ // Minimum number of times to run Program. Optional field. Defaults to 1.
+ //
+ // The program may be executed more often than this due to interruptions, e.g.
+ // when runtime.AllThreadsSyscall is invoked.
Repeat uint32
// Optional flags.
Flags uint32
@@ -490,6 +547,8 @@ type RunOptions struct {
CPU uint32
// Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer
// or similar. Typically used during benchmarking. Optional field.
+ //
+ // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead.
Reset func()
}
@@ -517,9 +576,9 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) {
Repeat: 1,
}
- ret, _, err := p.testRun(&opts)
+ ret, _, err := p.run(&opts)
if err != nil {
- return ret, nil, fmt.Errorf("can't test program: %w", err)
+ return ret, nil, fmt.Errorf("test program: %w", err)
}
return ret, opts.DataOut, nil
}
@@ -528,9 +587,9 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) {
//
// Note: the same restrictions from Test apply.
func (p *Program) Run(opts *RunOptions) (uint32, error) {
- ret, _, err := p.testRun(opts)
+ ret, _, err := p.run(opts)
if err != nil {
- return ret, fmt.Errorf("can't test program: %w", err)
+ return ret, fmt.Errorf("run program: %w", err)
}
return ret, nil
}
@@ -542,9 +601,6 @@ func (p *Program) Run(opts *RunOptions) (uint32, error) {
// run or an error. reset is called whenever the benchmark syscall is
// interrupted, and should be set to testing.B.ResetTimer or similar.
//
-// Note: profiling a call to this function will skew it's results, see
-// /~https://github.com/cilium/ebpf/issues/24
-//
// This function requires at least Linux 4.12.
func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
if uint(repeat) > math.MaxUint32 {
@@ -557,14 +613,14 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D
Reset: reset,
}
- ret, total, err := p.testRun(&opts)
+ ret, total, err := p.run(&opts)
if err != nil {
- return ret, total, fmt.Errorf("can't benchmark program: %w", err)
+ return ret, total, fmt.Errorf("benchmark program: %w", err)
}
return ret, total, nil
}
-var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error {
+var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error {
prog, err := NewProgram(&ProgramSpec{
// SocketFilter does not require privileges on newer kernels.
Type: SocketFilter,
@@ -580,8 +636,7 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e
}
defer prog.Close()
- // Programs require at least 14 bytes input
- in := make([]byte, 14)
+ in := internal.EmptyBPFContext
attr := sys.ProgRunAttr{
ProgFd: uint32(prog.FD()),
DataSizeIn: uint32(len(in)),
@@ -599,7 +654,7 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e
// We know that PROG_TEST_RUN is supported if we get EINTR.
return nil
- case errors.Is(err, unix.ENOTSUPP):
+ case errors.Is(err, sys.ENOTSUPP):
// The first PROG_TEST_RUN patches shipped in 4.12 didn't include
// a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is
// supported, but not for the program type used in the probe.
@@ -609,12 +664,12 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e
return err
})
-func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) {
+func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) {
if uint(len(opts.Data)) > math.MaxUint32 {
return 0, 0, fmt.Errorf("input is too long")
}
- if err := haveProgTestRun(); err != nil {
+ if err := haveProgRun(); err != nil {
return 0, 0, err
}
@@ -647,24 +702,45 @@ func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) {
Cpu: opts.CPU,
}
+ if attr.Repeat == 0 {
+ attr.Repeat = 1
+ }
+
+retry:
for {
err := sys.ProgRun(&attr)
if err == nil {
- break
+ break retry
}
if errors.Is(err, unix.EINTR) {
+ if attr.Repeat == 1 {
+ // Older kernels check whether enough repetitions have been
+ // executed only after checking for pending signals.
+ //
+ // run signal? done? run ...
+ //
+ // As a result we can get EINTR for repeat==1 even though
+ // the program was run exactly once. Treat this as a
+ // successful run instead.
+ //
+ // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code")
+ // the conditions are reversed:
+ // run done? signal? ...
+ break retry
+ }
+
if opts.Reset != nil {
opts.Reset()
}
- continue
+ continue retry
}
- if errors.Is(err, unix.ENOTSUPP) {
- return 0, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported)
+ if errors.Is(err, sys.ENOTSUPP) {
+ return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported)
}
- return 0, 0, fmt.Errorf("can't run test: %w", err)
+ return 0, 0, err
}
if opts.DataOut != nil {
@@ -726,7 +802,14 @@ func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error)
return nil, fmt.Errorf("info for %s: %w", fileName, err)
}
- return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil
+ var progName string
+ if haveObjName() == nil {
+ progName = info.Name
+ } else {
+ progName = filepath.Base(fileName)
+ }
+
+ return &Program{"", fd, progName, fileName, info.Type}, nil
}
// SanitizeName replaces all invalid characters in name with replacement.
@@ -770,11 +853,15 @@ var errUnrecognizedAttachType = errors.New("unrecognized attach type")
// find an attach target type in the kernel.
//
-// spec may be nil and defaults to the canonical kernel BTF. name together with
-// progType and attachType determine which type we need to attach to.
+// name, progType and attachType determine which type we need to attach to.
//
-// Returns errUnrecognizedAttachType.
-func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
+// The attach target may be in a loaded kernel module.
+// In that case the returned handle will be non-nil.
+// The caller is responsible for closing the handle.
+//
+// Returns errUnrecognizedAttachType if the combination of progType and attachType
+// is not recognised.
+func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) {
type match struct {
p ProgramType
a AttachType
@@ -782,59 +869,123 @@ func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attac
var (
typeName, featureName string
- isBTFTypeFunc = true
+ target btf.Type
)
switch (match{progType, attachType}) {
case match{LSM, AttachLSMMac}:
typeName = "bpf_lsm_" + name
featureName = name + " LSM hook"
+ target = (*btf.Func)(nil)
case match{Tracing, AttachTraceIter}:
typeName = "bpf_iter_" + name
featureName = name + " iterator"
+ target = (*btf.Func)(nil)
case match{Tracing, AttachTraceFEntry}:
typeName = name
featureName = fmt.Sprintf("fentry %s", name)
+ target = (*btf.Func)(nil)
case match{Tracing, AttachTraceFExit}:
typeName = name
featureName = fmt.Sprintf("fexit %s", name)
+ target = (*btf.Func)(nil)
case match{Tracing, AttachModifyReturn}:
typeName = name
featureName = fmt.Sprintf("fmod_ret %s", name)
+ target = (*btf.Func)(nil)
case match{Tracing, AttachTraceRawTp}:
typeName = fmt.Sprintf("btf_trace_%s", name)
featureName = fmt.Sprintf("raw_tp %s", name)
- isBTFTypeFunc = false
+ target = (*btf.Typedef)(nil)
default:
- return 0, errUnrecognizedAttachType
+ return nil, 0, errUnrecognizedAttachType
}
- spec, err := maybeLoadKernelBTF(spec)
+ spec, err := btf.LoadKernelSpec()
if err != nil {
- return 0, fmt.Errorf("load kernel spec: %w", err)
+ return nil, 0, fmt.Errorf("load kernel spec: %w", err)
}
- var target btf.Type
- if isBTFTypeFunc {
- var targetFunc *btf.Func
- err = spec.TypeByName(typeName, &targetFunc)
- target = targetFunc
- } else {
- var targetTypedef *btf.Typedef
- err = spec.TypeByName(typeName, &targetTypedef)
- target = targetTypedef
+ spec, module, err := findTargetInKernel(spec, typeName, &target)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, 0, &internal.UnsupportedFeatureError{Name: featureName}
}
+ // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel
+ // symbols, we should explicitly refuse program loads. They will not reliably
+ // do what the caller intended.
+ if errors.Is(err, btf.ErrMultipleMatches) {
+ return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err)
+ }
+ if err != nil {
+ return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err)
+ }
+
+ id, err := spec.TypeID(target)
+ return module, id, err
+}
+// findTargetInKernel attempts to find a named type in the current kernel.
+//
+// target will point at the found type after a successful call. Searches both
+// vmlinux and any loaded modules.
+//
+// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound
+// if the type wasn't found at all.
+func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+ err := kernelSpec.TypeByName(typeName, target)
+ if errors.Is(err, btf.ErrNotFound) {
+ spec, module, err := findTargetInModule(kernelSpec, typeName, target)
+ if err != nil {
+ return nil, nil, fmt.Errorf("find target in modules: %w", err)
+ }
+ return spec, module, nil
+ }
if err != nil {
+ return nil, nil, fmt.Errorf("find target in vmlinux: %w", err)
+ }
+ return kernelSpec, nil, err
+}
+
+// findTargetInModule attempts to find a named type in any loaded module.
+//
+// base must contain the kernel's types and is used to parse kmod BTF. Modules
+// are searched in the order they were loaded.
+//
+// Returns btf.ErrNotFound if the target can't be found in any module.
+func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+ it := new(btf.HandleIterator)
+ defer it.Handle.Close()
+
+ for it.Next() {
+ info, err := it.Handle.Info()
+ if err != nil {
+ return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err)
+ }
+
+ if !info.IsModule() {
+ continue
+ }
+
+ spec, err := it.Handle.Spec(base)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err)
+ }
+
+ err = spec.TypeByName(typeName, target)
if errors.Is(err, btf.ErrNotFound) {
- return 0, &internal.UnsupportedFeatureError{
- Name: featureName,
- }
+ continue
+ }
+ if err != nil {
+ return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err)
}
- return 0, fmt.Errorf("find target for %s: %w", featureName, err)
+
+ return spec, it.Take(), nil
+ }
+ if err := it.Err(); err != nil {
+ return nil, nil, fmt.Errorf("iterate modules: %w", err)
}
- return spec.TypeID(target)
+ return nil, nil, btf.ErrNotFound
}
// find an attach target type in a program.
diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh
index c21cca9e5e7d..1d1490ad1d97 100644
--- a/vendor/github.com/cilium/ebpf/run-tests.sh
+++ b/vendor/github.com/cilium/ebpf/run-tests.sh
@@ -6,6 +6,8 @@
# $ ./run-tests.sh 5.4
# Run a subset of tests:
# $ ./run-tests.sh 5.4 ./link
+# Run using a local kernel image
+# $ ./run-tests.sh /path/to/bzImage
set -euo pipefail
@@ -95,38 +97,45 @@ elif [[ "${1:-}" = "--exec-test" ]]; then
exit $rc # this return code is "swallowed" by qemu
fi
-readonly kernel_version="${1:-}"
-if [[ -z "${kernel_version}" ]]; then
- echo "Expecting kernel version as first argument"
+if [[ -z "${1:-}" ]]; then
+ echo "Expecting kernel version or path as first argument"
exit 1
fi
-shift
-readonly kernel="linux-${kernel_version}.bz"
-readonly selftests="linux-${kernel_version}-selftests-bpf.tgz"
readonly input="$(mktemp -d)"
readonly tmp_dir="${TMPDIR:-/tmp}"
-readonly branch="${BRANCH:-master}"
fetch() {
echo Fetching "${1}"
pushd "${tmp_dir}" > /dev/null
- curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "/~https://github.com/cilium/ci-kernels/raw/${branch}/${1}"
+ curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "/~https://github.com/cilium/ci-kernels/raw/${BRANCH:-master}/${1}"
local ret=$?
popd > /dev/null
return $ret
}
-fetch "${kernel}"
-cp "${tmp_dir}/${kernel}" "${input}/bzImage"
-
-if fetch "${selftests}"; then
- echo "Decompressing selftests"
- mkdir "${input}/bpf"
- tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
+if [[ -f "${1}" ]]; then
+ readonly kernel="${1}"
+ cp "${1}" "${input}/bzImage"
else
- echo "No selftests found, disabling"
+# LINUX_VERSION_CODE test compares this to discovered value.
+ export KERNEL_VERSION="${1}"
+
+ readonly kernel="linux-${1}.bz"
+ readonly selftests="linux-${1}-selftests-bpf.tgz"
+
+ fetch "${kernel}"
+ cp "${tmp_dir}/${kernel}" "${input}/bzImage"
+
+ if fetch "${selftests}"; then
+ echo "Decompressing selftests"
+ mkdir "${input}/bpf"
+ tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
+ else
+ echo "No selftests found, disabling"
+ fi
fi
+shift
args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...)
if (( $# > 0 )); then
@@ -135,11 +144,9 @@ fi
export GOFLAGS=-mod=readonly
export CGO_ENABLED=0
-# LINUX_VERSION_CODE test compares this to discovered value.
-export KERNEL_VERSION="${kernel_version}"
-echo Testing on "${kernel_version}"
+echo Testing on "${kernel}"
go test -exec "$script --exec-vm $input" "${args[@]}"
-echo "Test successful on ${kernel_version}"
+echo "Test successful on ${kernel}"
rm -r "${input}"
diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go
index e5c270a55851..fd21dea24ffc 100644
--- a/vendor/github.com/cilium/ebpf/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/syscalls.go
@@ -4,13 +4,25 @@ import (
"bytes"
"errors"
"fmt"
+ "os"
+ "runtime"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
"github.com/cilium/ebpf/internal/unix"
)
+var (
+ // pre-allocating these here since they may
+ // get called in hot code paths and cause
+ // unnecessary memory allocations
+ sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT)
+ sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST)
+ sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP)
+)
+
// invalidBPFObjNameChar returns true if char may not appear in
// a BPF object name.
func invalidBPFObjNameChar(char rune) bool {
@@ -47,7 +59,7 @@ func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD,
})
}
-var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
+var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error {
_, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(ArrayOfMaps),
KeySize: 4,
@@ -65,7 +77,7 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
return err
})
-var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error {
+var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error {
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
m, err := sys.MapCreate(&sys.MapCreateAttr{
@@ -82,7 +94,7 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps
return nil
})
-var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error {
+var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error {
// This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps.
m, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(Array),
@@ -98,7 +110,7 @@ var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error
return nil
})
-var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error {
+var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error {
// This checks BPF_F_INNER_MAP, which appeared in 5.10.
m, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(Array),
@@ -114,7 +126,7 @@ var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error {
return nil
})
-var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error {
+var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error {
// This checks BPF_F_NO_PREALLOC, which appeared in 4.6.
m, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(Hash),
@@ -136,15 +148,15 @@ func wrapMapError(err error) error {
}
if errors.Is(err, unix.ENOENT) {
- return sys.Error(ErrKeyNotExist, unix.ENOENT)
+ return sysErrKeyNotExist
}
if errors.Is(err, unix.EEXIST) {
- return sys.Error(ErrKeyExist, unix.EEXIST)
+ return sysErrKeyExist
}
- if errors.Is(err, unix.ENOTSUPP) {
- return sys.Error(ErrNotSupported, unix.ENOTSUPP)
+ if errors.Is(err, sys.ENOTSUPP) {
+ return sysErrNotSupported
}
if errors.Is(err, unix.E2BIG) {
@@ -154,7 +166,7 @@ func wrapMapError(err error) error {
return err
}
-var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
+var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error {
attr := sys.MapCreateAttr{
MapType: sys.MapType(Array),
KeySize: 4,
@@ -172,7 +184,7 @@ var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
return nil
})
-var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error {
+var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error {
if err := haveObjName(); err != nil {
return err
}
@@ -194,7 +206,7 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
return nil
})
-var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
+var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error {
var maxEntries uint32 = 2
attr := sys.MapCreateAttr{
MapType: sys.MapType(Hash),
@@ -226,7 +238,7 @@ var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
return nil
})
-var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error {
+var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error {
insns := asm.Instructions{
asm.Mov.Reg(asm.R1, asm.R10),
asm.Add.Imm(asm.R1, -8),
@@ -244,7 +256,7 @@ var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", f
return nil
})
-var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error {
+var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error {
insns := asm.Instructions{
asm.Call.Label("prog2").WithSymbol("prog1"),
asm.Return(),
@@ -262,3 +274,32 @@ var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() err
_ = fd.Close()
return nil
})
+
+var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error {
+ prefix := internal.PlatformPrefix()
+ if prefix == "" {
+ return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH)
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Kprobe,
+ Symbol: prefix + "sys_bpf",
+ Pid: -1,
+ }
+
+ var err error
+ args.Group, err = tracefs.RandomGroup("ebpf_probe")
+ if err != nil {
+ return err
+ }
+
+ evt, err := tracefs.NewEvent(args)
+ if errors.Is(err, os.ErrNotExist) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ return evt.Close()
+})
diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go
index a27b44247454..35927e2ab80b 100644
--- a/vendor/github.com/cilium/ebpf/types.go
+++ b/vendor/github.com/cilium/ebpf/types.go
@@ -1,6 +1,7 @@
package ebpf
import (
+ "github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -10,11 +11,6 @@ import (
// that will be initialized in the kernel.
type MapType uint32
-// Max returns the latest supported MapType.
-func (MapType) Max() MapType {
- return maxMapType - 1
-}
-
// All the various map types that can be created
const (
UnspecifiedMap MapType = iota
@@ -99,8 +95,6 @@ const (
InodeStorage
// TaskStorage - Specialized local storage map for task_struct.
TaskStorage
- // maxMapType - Bound enum of MapTypes, has to be last in enum.
- maxMapType
)
// hasPerCPUValue returns true if the Map stores a value per CPU.
@@ -120,25 +114,9 @@ func (mt MapType) canStoreProgram() bool {
return mt == ProgramArray
}
-// hasBTF returns true if the map type supports BTF key/value metadata.
-func (mt MapType) hasBTF() bool {
- switch mt {
- case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap,
- DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf:
- return false
- default:
- return true
- }
-}
-
// ProgramType of the eBPF program
type ProgramType uint32
-// Max return the latest supported ProgramType.
-func (ProgramType) Max() ProgramType {
- return maxProgramType - 1
-}
-
// eBPF program types
const (
UnspecifiedProgram ProgramType = iota
@@ -173,7 +151,6 @@ const (
LSM
SkLookup
Syscall
- maxProgramType
)
// AttachType of the eBPF program, needed to differentiate allowed context accesses in
@@ -229,6 +206,7 @@ const (
AttachSkReuseportSelect
AttachSkReuseportSelectOrMigrate
AttachPerfEvent
+ AttachTraceKprobeMulti
)
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
@@ -282,3 +260,20 @@ type BatchOptions struct {
ElemFlags uint64
Flags uint64
}
+
+// LogLevel controls the verbosity of the kernel's eBPF program verifier.
+// These constants can be used for the ProgramOptions.LogLevel field.
+type LogLevel = sys.LogLevel
+
+const (
+ // Print verifier state at branch points.
+ LogLevelBranch = sys.BPF_LOG_LEVEL1
+
+ // Print verifier state for every instruction.
+ // Available since Linux v5.2.
+ LogLevelInstruction = sys.BPF_LOG_LEVEL2
+
+ // Print verifier errors and stats at the end of the verification process.
+ // Available since Linux v5.2.
+ LogLevelStats = sys.BPF_LOG_STATS
+)
diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go
index e80b948b0960..5679f2254301 100644
--- a/vendor/github.com/cilium/ebpf/types_string.go
+++ b/vendor/github.com/cilium/ebpf/types_string.go
@@ -38,12 +38,11 @@ func _() {
_ = x[RingBuf-27]
_ = x[InodeStorage-28]
_ = x[TaskStorage-29]
- _ = x[maxMapType-30]
}
-const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType"
+const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage"
-var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300}
+var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290}
func (i MapType) String() string {
if i >= MapType(len(_MapType_index)-1) {
@@ -87,12 +86,11 @@ func _() {
_ = x[LSM-29]
_ = x[SkLookup-30]
_ = x[Syscall-31]
- _ = x[maxProgramType-32]
}
-const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType"
+const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscall"
-var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315}
+var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301}
func (i ProgramType) String() string {
if i >= ProgramType(len(_ProgramType_index)-1) {
diff --git a/vendor/github.com/containerd/cgroups/v3/Makefile b/vendor/github.com/containerd/cgroups/v3/Makefile
index 088572980dff..8f8b6bc5ace8 100644
--- a/vendor/github.com/containerd/cgroups/v3/Makefile
+++ b/vendor/github.com/containerd/cgroups/v3/Makefile
@@ -13,14 +13,17 @@
# limitations under the License.
PACKAGES=$(shell go list ./... | grep -v /vendor/)
+GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",)
+GO ?= go
+GO_BUILD_FLAGS ?=
all: cgutil
- go build -v
+ $(GO) build -v $(GO_TAGS)
cgutil:
- cd cmd/cgctl && go build -v
+ cd cmd/cgctl && $(GO) build $(GO_BUILD_FLAGS) -v $(GO_TAGS)
proto:
protobuild --quiet ${PACKAGES}
# Keep them Go-idiomatic and backward-compatible with the gogo/protobuf era.
- go-fix-acronym -w -a '(Cpu|Tcp|Rss)' $(shell find cgroup1/stats/ cgroup2/stats/ -name '*.pb.go')
+ go-fix-acronym -w -a '(Cpu|Tcp|Rss|Psi)' $(shell find cgroup1/stats/ cgroup2/stats/ -name '*.pb.go')
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/memory.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/memory.go
index caf5e9a7ebb0..52fe690755d7 100644
--- a/vendor/github.com/containerd/cgroups/v3/cgroup1/memory.go
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/memory.go
@@ -454,6 +454,9 @@ func getOomControlValue(mem *specs.LinuxMemory) *int64 {
if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller {
i := int64(1)
return &i
+ } else if mem.DisableOOMKiller != nil && !*mem.DisableOOMKiller {
+ i := int64(0)
+ return &i
}
return nil
}
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup2/manager.go b/vendor/github.com/containerd/cgroups/v3/cgroup2/manager.go
index 4a4292d5fcbf..e540322b7fdf 100644
--- a/vendor/github.com/containerd/cgroups/v3/cgroup2/manager.go
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup2/manager.go
@@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
+ "io/fs"
"math"
"os"
"path/filepath"
@@ -478,9 +479,9 @@ func (c *Manager) Delete() error {
return remove(c.path)
}
-func (c *Manager) Procs(recursive bool) ([]uint64, error) {
- var processes []uint64
- err := filepath.Walk(c.path, func(p string, info os.FileInfo, err error) error {
+func (c *Manager) getTasks(recursive bool, tType string) ([]uint64, error) {
+ var tasks []uint64
+ err := filepath.Walk(c.path, func(p string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
@@ -491,17 +492,25 @@ func (c *Manager) Procs(recursive bool) ([]uint64, error) {
return filepath.SkipDir
}
_, name := filepath.Split(p)
- if name != cgroupProcs {
+ if name != tType {
return nil
}
- procs, err := parseCgroupProcsFile(p)
+ curTasks, err := parseCgroupTasksFile(p)
if err != nil {
return err
}
- processes = append(processes, procs...)
+ tasks = append(tasks, curTasks...)
return nil
})
- return processes, err
+ return tasks, err
+}
+
+func (c *Manager) Procs(recursive bool) ([]uint64, error) {
+ return c.getTasks(recursive, cgroupProcs)
+}
+
+func (c *Manager) Threads(recursive bool) ([]uint64, error) {
+ return c.getTasks(recursive, cgroupThreads)
}
func (c *Manager) MoveTo(destination *Manager) error {
@@ -559,6 +568,7 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
NrPeriods: out["nr_periods"],
NrThrottled: out["nr_throttled"],
ThrottledUsec: out["throttled_usec"],
+ PSI: getStatPSIFromFile(filepath.Join(c.path, "cpu.pressure")),
}
metrics.Memory = &stats.MemoryStat{
Anon: out["anon"],
@@ -594,8 +604,11 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
ThpCollapseAlloc: out["thp_collapse_alloc"],
Usage: getStatFileContentUint64(filepath.Join(c.path, "memory.current")),
UsageLimit: getStatFileContentUint64(filepath.Join(c.path, "memory.max")),
+ MaxUsage: getStatFileContentUint64(filepath.Join(c.path, "memory.peak")),
SwapUsage: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.current")),
SwapLimit: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.max")),
+ SwapMaxUsage: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.peak")),
+ PSI: getStatPSIFromFile(filepath.Join(c.path, "memory.pressure")),
}
if len(memoryEvents) > 0 {
metrics.MemoryEvents = &stats.MemoryEvents{
@@ -606,7 +619,10 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
OomKill: memoryEvents["oom_kill"],
}
}
- metrics.Io = &stats.IOStat{Usage: readIoStats(c.path)}
+ metrics.Io = &stats.IOStat{
+ Usage: readIoStats(c.path),
+ PSI: getStatPSIFromFile(filepath.Join(c.path, "io.pressure")),
+ }
metrics.Rdma = &stats.RdmaStat{
Current: rdmaStats(filepath.Join(c.path, "rdma.current")),
Limit: rdmaStats(filepath.Join(c.path, "rdma.max")),
@@ -870,14 +886,7 @@ func NewSystemd(slice, group string, pid int, resources *Resources) (*Manager, e
newSystemdProperty("TasksMax", uint64(resources.Pids.Max)))
}
- statusChan := make(chan string, 1)
- if _, err := conn.StartTransientUnitContext(ctx, group, "replace", properties, statusChan); err == nil {
- select {
- case <-statusChan:
- case <-time.After(time.Second):
- logrus.Warnf("Timed out while waiting for StartTransientUnit(%s) completion signal from dbus. Continuing...", group)
- }
- } else if !isUnitExists(err) {
+ if err := startUnit(conn, group, properties, pid == -1); err != nil {
return &Manager{}, err
}
@@ -886,6 +895,60 @@ func NewSystemd(slice, group string, pid int, resources *Resources) (*Manager, e
}, nil
}
+func startUnit(conn *systemdDbus.Conn, group string, properties []systemdDbus.Property, ignoreExists bool) error {
+ ctx := context.TODO()
+
+ statusChan := make(chan string, 1)
+ defer close(statusChan)
+
+ retry := true
+ started := false
+
+ for !started {
+ if _, err := conn.StartTransientUnitContext(ctx, group, "replace", properties, statusChan); err != nil {
+ if !isUnitExists(err) {
+ return err
+ }
+
+ if ignoreExists {
+ return nil
+ }
+
+ if retry {
+ retry = false
+ // When a unit of the same name already exists, it may be a leftover failed unit.
+ // If we reset it once, systemd can try to remove it.
+ attemptFailedUnitReset(conn, group)
+ continue
+ }
+
+ return err
+ } else {
+ started = true
+ }
+ }
+
+ select {
+ case s := <-statusChan:
+ if s != "done" {
+ attemptFailedUnitReset(conn, group)
+ return fmt.Errorf("error creating systemd unit `%s`: got `%s`", group, s)
+ }
+ case <-time.After(30 * time.Second):
+ logrus.Warnf("Timed out while waiting for StartTransientUnit(%s) completion signal from dbus. Continuing...", group)
+ }
+
+ return nil
+}
+
+func attemptFailedUnitReset(conn *systemdDbus.Conn, group string) {
+ err := conn.ResetFailedUnitContext(context.TODO(), group)
+
+ if err != nil {
+ logrus.Warnf("Unable to reset failed unit: %v", err)
+ }
+}
+
func LoadSystemd(slice, group string) (*Manager, error) {
if slice == "" {
slice = defaultSlice
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.go b/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.go
index d4cb4ad48996..3d53c224ceb8 100644
--- a/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.go
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.go
@@ -115,6 +115,132 @@ func (x *Metrics) GetMemoryEvents() *MemoryEvents {
return nil
}
+type PSIData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Avg10 float64 `protobuf:"fixed64,1,opt,name=avg10,proto3" json:"avg10,omitempty"`
+ Avg60 float64 `protobuf:"fixed64,2,opt,name=avg60,proto3" json:"avg60,omitempty"`
+ Avg300 float64 `protobuf:"fixed64,3,opt,name=avg300,proto3" json:"avg300,omitempty"`
+ Total uint64 `protobuf:"varint,4,opt,name=total,proto3" json:"total,omitempty"`
+}
+
+func (x *PSIData) Reset() {
+ *x = PSIData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PSIData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PSIData) ProtoMessage() {}
+
+func (x *PSIData) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PSIData.ProtoReflect.Descriptor instead.
+func (*PSIData) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *PSIData) GetAvg10() float64 {
+ if x != nil {
+ return x.Avg10
+ }
+ return 0
+}
+
+func (x *PSIData) GetAvg60() float64 {
+ if x != nil {
+ return x.Avg60
+ }
+ return 0
+}
+
+func (x *PSIData) GetAvg300() float64 {
+ if x != nil {
+ return x.Avg300
+ }
+ return 0
+}
+
+func (x *PSIData) GetTotal() uint64 {
+ if x != nil {
+ return x.Total
+ }
+ return 0
+}
+
+type PSIStats struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Some *PSIData `protobuf:"bytes,1,opt,name=some,proto3" json:"some,omitempty"`
+ Full *PSIData `protobuf:"bytes,2,opt,name=full,proto3" json:"full,omitempty"`
+}
+
+func (x *PSIStats) Reset() {
+ *x = PSIStats{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PSIStats) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PSIStats) ProtoMessage() {}
+
+func (x *PSIStats) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PSIStats.ProtoReflect.Descriptor instead.
+func (*PSIStats) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *PSIStats) GetSome() *PSIData {
+ if x != nil {
+ return x.Some
+ }
+ return nil
+}
+
+func (x *PSIStats) GetFull() *PSIData {
+ if x != nil {
+ return x.Full
+ }
+ return nil
+}
+
type PidsStat struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -127,7 +253,7 @@ type PidsStat struct {
func (x *PidsStat) Reset() {
*x = PidsStat{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[1]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -140,7 +266,7 @@ func (x *PidsStat) String() string {
func (*PidsStat) ProtoMessage() {}
func (x *PidsStat) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[1]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -153,7 +279,7 @@ func (x *PidsStat) ProtoReflect() protoreflect.Message {
// Deprecated: Use PidsStat.ProtoReflect.Descriptor instead.
func (*PidsStat) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{1}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{3}
}
func (x *PidsStat) GetCurrent() uint64 {
@@ -175,18 +301,19 @@ type CPUStat struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- UsageUsec uint64 `protobuf:"varint,1,opt,name=usage_usec,json=usageUsec,proto3" json:"usage_usec,omitempty"`
- UserUsec uint64 `protobuf:"varint,2,opt,name=user_usec,json=userUsec,proto3" json:"user_usec,omitempty"`
- SystemUsec uint64 `protobuf:"varint,3,opt,name=system_usec,json=systemUsec,proto3" json:"system_usec,omitempty"`
- NrPeriods uint64 `protobuf:"varint,4,opt,name=nr_periods,json=nrPeriods,proto3" json:"nr_periods,omitempty"`
- NrThrottled uint64 `protobuf:"varint,5,opt,name=nr_throttled,json=nrThrottled,proto3" json:"nr_throttled,omitempty"`
- ThrottledUsec uint64 `protobuf:"varint,6,opt,name=throttled_usec,json=throttledUsec,proto3" json:"throttled_usec,omitempty"`
+ UsageUsec uint64 `protobuf:"varint,1,opt,name=usage_usec,json=usageUsec,proto3" json:"usage_usec,omitempty"`
+ UserUsec uint64 `protobuf:"varint,2,opt,name=user_usec,json=userUsec,proto3" json:"user_usec,omitempty"`
+ SystemUsec uint64 `protobuf:"varint,3,opt,name=system_usec,json=systemUsec,proto3" json:"system_usec,omitempty"`
+ NrPeriods uint64 `protobuf:"varint,4,opt,name=nr_periods,json=nrPeriods,proto3" json:"nr_periods,omitempty"`
+ NrThrottled uint64 `protobuf:"varint,5,opt,name=nr_throttled,json=nrThrottled,proto3" json:"nr_throttled,omitempty"`
+ ThrottledUsec uint64 `protobuf:"varint,6,opt,name=throttled_usec,json=throttledUsec,proto3" json:"throttled_usec,omitempty"`
+ PSI *PSIStats `protobuf:"bytes,7,opt,name=psi,proto3" json:"psi,omitempty"`
}
func (x *CPUStat) Reset() {
*x = CPUStat{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[2]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -199,7 +326,7 @@ func (x *CPUStat) String() string {
func (*CPUStat) ProtoMessage() {}
func (x *CPUStat) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[2]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -212,7 +339,7 @@ func (x *CPUStat) ProtoReflect() protoreflect.Message {
// Deprecated: Use CPUStat.ProtoReflect.Descriptor instead.
func (*CPUStat) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{2}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{4}
}
func (x *CPUStat) GetUsageUsec() uint64 {
@@ -257,52 +384,62 @@ func (x *CPUStat) GetThrottledUsec() uint64 {
return 0
}
+func (x *CPUStat) GetPSI() *PSIStats {
+ if x != nil {
+ return x.PSI
+ }
+ return nil
+}
+
type MemoryStat struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Anon uint64 `protobuf:"varint,1,opt,name=anon,proto3" json:"anon,omitempty"`
- File uint64 `protobuf:"varint,2,opt,name=file,proto3" json:"file,omitempty"`
- KernelStack uint64 `protobuf:"varint,3,opt,name=kernel_stack,json=kernelStack,proto3" json:"kernel_stack,omitempty"`
- Slab uint64 `protobuf:"varint,4,opt,name=slab,proto3" json:"slab,omitempty"`
- Sock uint64 `protobuf:"varint,5,opt,name=sock,proto3" json:"sock,omitempty"`
- Shmem uint64 `protobuf:"varint,6,opt,name=shmem,proto3" json:"shmem,omitempty"`
- FileMapped uint64 `protobuf:"varint,7,opt,name=file_mapped,json=fileMapped,proto3" json:"file_mapped,omitempty"`
- FileDirty uint64 `protobuf:"varint,8,opt,name=file_dirty,json=fileDirty,proto3" json:"file_dirty,omitempty"`
- FileWriteback uint64 `protobuf:"varint,9,opt,name=file_writeback,json=fileWriteback,proto3" json:"file_writeback,omitempty"`
- AnonThp uint64 `protobuf:"varint,10,opt,name=anon_thp,json=anonThp,proto3" json:"anon_thp,omitempty"`
- InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"`
- ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"`
- InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"`
- ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"`
- Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"`
- SlabReclaimable uint64 `protobuf:"varint,16,opt,name=slab_reclaimable,json=slabReclaimable,proto3" json:"slab_reclaimable,omitempty"`
- SlabUnreclaimable uint64 `protobuf:"varint,17,opt,name=slab_unreclaimable,json=slabUnreclaimable,proto3" json:"slab_unreclaimable,omitempty"`
- Pgfault uint64 `protobuf:"varint,18,opt,name=pgfault,proto3" json:"pgfault,omitempty"`
- Pgmajfault uint64 `protobuf:"varint,19,opt,name=pgmajfault,proto3" json:"pgmajfault,omitempty"`
- WorkingsetRefault uint64 `protobuf:"varint,20,opt,name=workingset_refault,json=workingsetRefault,proto3" json:"workingset_refault,omitempty"`
- WorkingsetActivate uint64 `protobuf:"varint,21,opt,name=workingset_activate,json=workingsetActivate,proto3" json:"workingset_activate,omitempty"`
- WorkingsetNodereclaim uint64 `protobuf:"varint,22,opt,name=workingset_nodereclaim,json=workingsetNodereclaim,proto3" json:"workingset_nodereclaim,omitempty"`
- Pgrefill uint64 `protobuf:"varint,23,opt,name=pgrefill,proto3" json:"pgrefill,omitempty"`
- Pgscan uint64 `protobuf:"varint,24,opt,name=pgscan,proto3" json:"pgscan,omitempty"`
- Pgsteal uint64 `protobuf:"varint,25,opt,name=pgsteal,proto3" json:"pgsteal,omitempty"`
- Pgactivate uint64 `protobuf:"varint,26,opt,name=pgactivate,proto3" json:"pgactivate,omitempty"`
- Pgdeactivate uint64 `protobuf:"varint,27,opt,name=pgdeactivate,proto3" json:"pgdeactivate,omitempty"`
- Pglazyfree uint64 `protobuf:"varint,28,opt,name=pglazyfree,proto3" json:"pglazyfree,omitempty"`
- Pglazyfreed uint64 `protobuf:"varint,29,opt,name=pglazyfreed,proto3" json:"pglazyfreed,omitempty"`
- ThpFaultAlloc uint64 `protobuf:"varint,30,opt,name=thp_fault_alloc,json=thpFaultAlloc,proto3" json:"thp_fault_alloc,omitempty"`
- ThpCollapseAlloc uint64 `protobuf:"varint,31,opt,name=thp_collapse_alloc,json=thpCollapseAlloc,proto3" json:"thp_collapse_alloc,omitempty"`
- Usage uint64 `protobuf:"varint,32,opt,name=usage,proto3" json:"usage,omitempty"`
- UsageLimit uint64 `protobuf:"varint,33,opt,name=usage_limit,json=usageLimit,proto3" json:"usage_limit,omitempty"`
- SwapUsage uint64 `protobuf:"varint,34,opt,name=swap_usage,json=swapUsage,proto3" json:"swap_usage,omitempty"`
- SwapLimit uint64 `protobuf:"varint,35,opt,name=swap_limit,json=swapLimit,proto3" json:"swap_limit,omitempty"`
+ Anon uint64 `protobuf:"varint,1,opt,name=anon,proto3" json:"anon,omitempty"`
+ File uint64 `protobuf:"varint,2,opt,name=file,proto3" json:"file,omitempty"`
+ KernelStack uint64 `protobuf:"varint,3,opt,name=kernel_stack,json=kernelStack,proto3" json:"kernel_stack,omitempty"`
+ Slab uint64 `protobuf:"varint,4,opt,name=slab,proto3" json:"slab,omitempty"`
+ Sock uint64 `protobuf:"varint,5,opt,name=sock,proto3" json:"sock,omitempty"`
+ Shmem uint64 `protobuf:"varint,6,opt,name=shmem,proto3" json:"shmem,omitempty"`
+ FileMapped uint64 `protobuf:"varint,7,opt,name=file_mapped,json=fileMapped,proto3" json:"file_mapped,omitempty"`
+ FileDirty uint64 `protobuf:"varint,8,opt,name=file_dirty,json=fileDirty,proto3" json:"file_dirty,omitempty"`
+ FileWriteback uint64 `protobuf:"varint,9,opt,name=file_writeback,json=fileWriteback,proto3" json:"file_writeback,omitempty"`
+ AnonThp uint64 `protobuf:"varint,10,opt,name=anon_thp,json=anonThp,proto3" json:"anon_thp,omitempty"`
+ InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"`
+ ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"`
+ InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"`
+ ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"`
+ Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"`
+ SlabReclaimable uint64 `protobuf:"varint,16,opt,name=slab_reclaimable,json=slabReclaimable,proto3" json:"slab_reclaimable,omitempty"`
+ SlabUnreclaimable uint64 `protobuf:"varint,17,opt,name=slab_unreclaimable,json=slabUnreclaimable,proto3" json:"slab_unreclaimable,omitempty"`
+ Pgfault uint64 `protobuf:"varint,18,opt,name=pgfault,proto3" json:"pgfault,omitempty"`
+ Pgmajfault uint64 `protobuf:"varint,19,opt,name=pgmajfault,proto3" json:"pgmajfault,omitempty"`
+ WorkingsetRefault uint64 `protobuf:"varint,20,opt,name=workingset_refault,json=workingsetRefault,proto3" json:"workingset_refault,omitempty"`
+ WorkingsetActivate uint64 `protobuf:"varint,21,opt,name=workingset_activate,json=workingsetActivate,proto3" json:"workingset_activate,omitempty"`
+ WorkingsetNodereclaim uint64 `protobuf:"varint,22,opt,name=workingset_nodereclaim,json=workingsetNodereclaim,proto3" json:"workingset_nodereclaim,omitempty"`
+ Pgrefill uint64 `protobuf:"varint,23,opt,name=pgrefill,proto3" json:"pgrefill,omitempty"`
+ Pgscan uint64 `protobuf:"varint,24,opt,name=pgscan,proto3" json:"pgscan,omitempty"`
+ Pgsteal uint64 `protobuf:"varint,25,opt,name=pgsteal,proto3" json:"pgsteal,omitempty"`
+ Pgactivate uint64 `protobuf:"varint,26,opt,name=pgactivate,proto3" json:"pgactivate,omitempty"`
+ Pgdeactivate uint64 `protobuf:"varint,27,opt,name=pgdeactivate,proto3" json:"pgdeactivate,omitempty"`
+ Pglazyfree uint64 `protobuf:"varint,28,opt,name=pglazyfree,proto3" json:"pglazyfree,omitempty"`
+ Pglazyfreed uint64 `protobuf:"varint,29,opt,name=pglazyfreed,proto3" json:"pglazyfreed,omitempty"`
+ ThpFaultAlloc uint64 `protobuf:"varint,30,opt,name=thp_fault_alloc,json=thpFaultAlloc,proto3" json:"thp_fault_alloc,omitempty"`
+ ThpCollapseAlloc uint64 `protobuf:"varint,31,opt,name=thp_collapse_alloc,json=thpCollapseAlloc,proto3" json:"thp_collapse_alloc,omitempty"`
+ Usage uint64 `protobuf:"varint,32,opt,name=usage,proto3" json:"usage,omitempty"`
+ UsageLimit uint64 `protobuf:"varint,33,opt,name=usage_limit,json=usageLimit,proto3" json:"usage_limit,omitempty"`
+ SwapUsage uint64 `protobuf:"varint,34,opt,name=swap_usage,json=swapUsage,proto3" json:"swap_usage,omitempty"`
+ SwapLimit uint64 `protobuf:"varint,35,opt,name=swap_limit,json=swapLimit,proto3" json:"swap_limit,omitempty"`
+ MaxUsage uint64 `protobuf:"varint,36,opt,name=max_usage,json=maxUsage,proto3" json:"max_usage,omitempty"`
+ SwapMaxUsage uint64 `protobuf:"varint,37,opt,name=swap_max_usage,json=swapMaxUsage,proto3" json:"swap_max_usage,omitempty"`
+ PSI *PSIStats `protobuf:"bytes,38,opt,name=psi,proto3" json:"psi,omitempty"`
}
func (x *MemoryStat) Reset() {
*x = MemoryStat{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[3]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -315,7 +452,7 @@ func (x *MemoryStat) String() string {
func (*MemoryStat) ProtoMessage() {}
func (x *MemoryStat) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[3]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -328,7 +465,7 @@ func (x *MemoryStat) ProtoReflect() protoreflect.Message {
// Deprecated: Use MemoryStat.ProtoReflect.Descriptor instead.
func (*MemoryStat) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{3}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{5}
}
func (x *MemoryStat) GetAnon() uint64 {
@@ -576,6 +713,27 @@ func (x *MemoryStat) GetSwapLimit() uint64 {
return 0
}
+func (x *MemoryStat) GetMaxUsage() uint64 {
+ if x != nil {
+ return x.MaxUsage
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetSwapMaxUsage() uint64 {
+ if x != nil {
+ return x.SwapMaxUsage
+ }
+ return 0
+}
+
+func (x *MemoryStat) GetPSI() *PSIStats {
+ if x != nil {
+ return x.PSI
+ }
+ return nil
+}
+
type MemoryEvents struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -591,7 +749,7 @@ type MemoryEvents struct {
func (x *MemoryEvents) Reset() {
*x = MemoryEvents{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[4]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -604,7 +762,7 @@ func (x *MemoryEvents) String() string {
func (*MemoryEvents) ProtoMessage() {}
func (x *MemoryEvents) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[4]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -617,7 +775,7 @@ func (x *MemoryEvents) ProtoReflect() protoreflect.Message {
// Deprecated: Use MemoryEvents.ProtoReflect.Descriptor instead.
func (*MemoryEvents) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{4}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{6}
}
func (x *MemoryEvents) GetLow() uint64 {
@@ -667,7 +825,7 @@ type RdmaStat struct {
func (x *RdmaStat) Reset() {
*x = RdmaStat{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[5]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -680,7 +838,7 @@ func (x *RdmaStat) String() string {
func (*RdmaStat) ProtoMessage() {}
func (x *RdmaStat) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[5]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -693,7 +851,7 @@ func (x *RdmaStat) ProtoReflect() protoreflect.Message {
// Deprecated: Use RdmaStat.ProtoReflect.Descriptor instead.
func (*RdmaStat) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{5}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{7}
}
func (x *RdmaStat) GetCurrent() []*RdmaEntry {
@@ -723,7 +881,7 @@ type RdmaEntry struct {
func (x *RdmaEntry) Reset() {
*x = RdmaEntry{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[6]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -736,7 +894,7 @@ func (x *RdmaEntry) String() string {
func (*RdmaEntry) ProtoMessage() {}
func (x *RdmaEntry) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[6]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -749,7 +907,7 @@ func (x *RdmaEntry) ProtoReflect() protoreflect.Message {
// Deprecated: Use RdmaEntry.ProtoReflect.Descriptor instead.
func (*RdmaEntry) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{6}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{8}
}
func (x *RdmaEntry) GetDevice() string {
@@ -779,12 +937,13 @@ type IOStat struct {
unknownFields protoimpl.UnknownFields
Usage []*IOEntry `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"`
+ PSI *PSIStats `protobuf:"bytes,2,opt,name=psi,proto3" json:"psi,omitempty"`
}
func (x *IOStat) Reset() {
*x = IOStat{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[7]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -797,7 +956,7 @@ func (x *IOStat) String() string {
func (*IOStat) ProtoMessage() {}
func (x *IOStat) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[7]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -810,7 +969,7 @@ func (x *IOStat) ProtoReflect() protoreflect.Message {
// Deprecated: Use IOStat.ProtoReflect.Descriptor instead.
func (*IOStat) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{7}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{9}
}
func (x *IOStat) GetUsage() []*IOEntry {
@@ -820,6 +979,13 @@ func (x *IOStat) GetUsage() []*IOEntry {
return nil
}
+func (x *IOStat) GetPSI() *PSIStats {
+ if x != nil {
+ return x.PSI
+ }
+ return nil
+}
+
type IOEntry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -836,7 +1002,7 @@ type IOEntry struct {
func (x *IOEntry) Reset() {
*x = IOEntry{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[8]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -849,7 +1015,7 @@ func (x *IOEntry) String() string {
func (*IOEntry) ProtoMessage() {}
func (x *IOEntry) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[8]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -862,7 +1028,7 @@ func (x *IOEntry) ProtoReflect() protoreflect.Message {
// Deprecated: Use IOEntry.ProtoReflect.Descriptor instead.
func (*IOEntry) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{8}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{10}
}
func (x *IOEntry) GetMajor() uint64 {
@@ -920,7 +1086,7 @@ type HugeTlbStat struct {
func (x *HugeTlbStat) Reset() {
*x = HugeTlbStat{}
if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[9]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -933,7 +1099,7 @@ func (x *HugeTlbStat) String() string {
func (*HugeTlbStat) ProtoMessage() {}
func (x *HugeTlbStat) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[9]
+ mi := &file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -946,7 +1112,7 @@ func (x *HugeTlbStat) ProtoReflect() protoreflect.Message {
// Deprecated: Use HugeTlbStat.ProtoReflect.Descriptor instead.
func (*HugeTlbStat) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{9}
+ return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(), []int{11}
}
func (x *HugeTlbStat) GetCurrent() uint64 {
@@ -1005,141 +1171,169 @@ var file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDesc = []b
0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63,
0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79,
0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3a, 0x0a, 0x08, 0x50, 0x69, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74,
- 0x12, 0x18, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69,
- 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x22, 0xcf, 0x01, 0x0a, 0x07, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x09, 0x75, 0x73, 0x61, 0x67, 0x65, 0x55, 0x73, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75,
- 0x73, 0x65, 0x72, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08,
- 0x75, 0x73, 0x65, 0x72, 0x55, 0x73, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x79, 0x73, 0x74,
- 0x65, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73,
- 0x79, 0x73, 0x74, 0x65, 0x6d, 0x55, 0x73, 0x65, 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x72, 0x5f,
- 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6e,
- 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x72, 0x5f, 0x74,
- 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
- 0x6e, 0x72, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x74,
- 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x55, 0x73,
- 0x65, 0x63, 0x22, 0x8f, 0x09, 0x0a, 0x0a, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61,
- 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x04, 0x61, 0x6e, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6b, 0x65, 0x72,
- 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x0b, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04,
- 0x73, 0x6c, 0x61, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x6c, 0x61, 0x62,
- 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04,
- 0x73, 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x6d, 0x65, 0x6d, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x68, 0x6d, 0x65, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69,
- 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x66,
- 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x09, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x74, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x69,
- 0x6c, 0x65, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63,
- 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x6e, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x70, 0x18, 0x0a, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x07, 0x61, 0x6e, 0x6f, 0x6e, 0x54, 0x68, 0x70, 0x12, 0x23, 0x0a, 0x0d,
- 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0b, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f,
- 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x6e, 0x6f, 0x6e,
- 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e,
- 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66,
- 0x69, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, 0x74,
- 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76,
- 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63,
- 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x65, 0x76,
- 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x75,
- 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6c,
- 0x61, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x10,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x73, 0x6c, 0x61, 0x62, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69,
- 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x6c, 0x61, 0x62, 0x5f, 0x75, 0x6e,
- 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x11, 0x73, 0x6c, 0x61, 0x62, 0x55, 0x6e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d,
- 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x67, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
- 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x67, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1e,
- 0x0a, 0x0a, 0x70, 0x67, 0x6d, 0x61, 0x6a, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x13, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x6d, 0x61, 0x6a, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x2d,
- 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b,
- 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a,
- 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69,
- 0x76, 0x61, 0x74, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b,
- 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x35,
- 0x0a, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64,
- 0x65, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15,
- 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x72, 0x65,
- 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x67, 0x72, 0x65, 0x66, 0x69, 0x6c,
- 0x6c, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x67, 0x72, 0x65, 0x66, 0x69, 0x6c,
- 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x67, 0x73, 0x63, 0x61, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x06, 0x70, 0x67, 0x73, 0x63, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x67, 0x73,
- 0x74, 0x65, 0x61, 0x6c, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x67, 0x73, 0x74,
- 0x65, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x67, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74,
- 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x61, 0x63, 0x74, 0x69, 0x76,
- 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x67, 0x64, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76,
- 0x61, 0x74, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x70, 0x67, 0x64, 0x65, 0x61,
- 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x67, 0x6c, 0x61, 0x7a,
- 0x79, 0x66, 0x72, 0x65, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x6c,
- 0x61, 0x7a, 0x79, 0x66, 0x72, 0x65, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x67, 0x6c, 0x61, 0x7a,
- 0x79, 0x66, 0x72, 0x65, 0x65, 0x64, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x70, 0x67,
- 0x6c, 0x61, 0x7a, 0x79, 0x66, 0x72, 0x65, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x68, 0x70,
- 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x1e, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x70, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x6c, 0x6c, 0x6f,
- 0x63, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x68, 0x70, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x61, 0x70, 0x73,
- 0x65, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74,
- 0x68, 0x70, 0x43, 0x6f, 0x6c, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x12,
- 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
- 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6c,
- 0x69, 0x6d, 0x69, 0x74, 0x18, 0x21, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67,
- 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x75,
- 0x73, 0x61, 0x67, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x77, 0x61, 0x70,
- 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x6c, 0x69,
- 0x6d, 0x69, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x77, 0x61, 0x70, 0x4c,
- 0x69, 0x6d, 0x69, 0x74, 0x22, 0x73, 0x0a, 0x0c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x03, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x67, 0x68, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x69, 0x67, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61,
- 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x10, 0x0a, 0x03,
- 0x6f, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6f, 0x6f, 0x6d, 0x12, 0x19,
- 0x0a, 0x08, 0x6f, 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x07, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x22, 0x84, 0x01, 0x0a, 0x08, 0x52, 0x64,
- 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e,
- 0x76, 0x32, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e,
- 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x22, 0x65, 0x0a, 0x09, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a,
- 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64,
- 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61, 0x5f, 0x68, 0x61, 0x6e,
- 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x68, 0x63, 0x61, 0x48,
- 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x68, 0x63, 0x61,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x06, 0x49, 0x4f, 0x53, 0x74, 0x61,
- 0x74, 0x12, 0x37, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
- 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x4f, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x07, 0x49,
- 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05,
- 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x69, 0x6e,
- 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x06, 0x72, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x77, 0x62, 0x79, 0x74,
- 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x69, 0x6f, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x04, 0x72, 0x69, 0x6f, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x69, 0x6f, 0x73, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x77, 0x69, 0x6f, 0x73, 0x22, 0x55, 0x0a, 0x0b, 0x48, 0x75,
- 0x67, 0x65, 0x54, 0x6c, 0x62, 0x53, 0x74, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69, 0x7a,
- 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75,
- 0x70, 0x73, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x32, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x6e, 0x74, 0x73, 0x22, 0x63, 0x0a, 0x07, 0x50, 0x53, 0x49, 0x44, 0x61, 0x74, 0x61, 0x12,
+ 0x14, 0x0a, 0x05, 0x61, 0x76, 0x67, 0x31, 0x30, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05,
+ 0x61, 0x76, 0x67, 0x31, 0x30, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x76, 0x67, 0x36, 0x30, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, 0x76, 0x67, 0x36, 0x30, 0x12, 0x16, 0x0a, 0x06, 0x61,
+ 0x76, 0x67, 0x33, 0x30, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x61, 0x76, 0x67,
+ 0x33, 0x30, 0x30, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x78, 0x0a, 0x08, 0x50, 0x53, 0x49,
+ 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x35, 0x0a, 0x04, 0x73, 0x6f, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50,
+ 0x53, 0x49, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x73, 0x6f, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x04,
+ 0x66, 0x75, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75,
+ 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x53, 0x49, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x66,
+ 0x75, 0x6c, 0x6c, 0x22, 0x3a, 0x0a, 0x08, 0x50, 0x69, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74, 0x12,
+ 0x18, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22,
+ 0x85, 0x02, 0x0a, 0x07, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x75,
+ 0x73, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x09, 0x75, 0x73, 0x61, 0x67, 0x65, 0x55, 0x73, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73,
+ 0x65, 0x72, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75,
+ 0x73, 0x65, 0x72, 0x55, 0x73, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x79, 0x73, 0x74, 0x65,
+ 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x79,
+ 0x73, 0x74, 0x65, 0x6d, 0x55, 0x73, 0x65, 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x72, 0x5f, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6e, 0x72,
+ 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x72, 0x5f, 0x74, 0x68,
+ 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6e,
+ 0x72, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x68,
+ 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x75, 0x73, 0x65, 0x63, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x55, 0x73, 0x65,
+ 0x63, 0x12, 0x34, 0x0a, 0x03, 0x70, 0x73, 0x69, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x53, 0x49, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x52, 0x03, 0x70, 0x73, 0x69, 0x22, 0x88, 0x0a, 0x0a, 0x0a, 0x4d, 0x65, 0x6d, 0x6f,
+ 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x61, 0x6e, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x69,
+ 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x61, 0x63,
+ 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6c, 0x61, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x04, 0x73, 0x6c, 0x61, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x6d,
+ 0x65, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x68, 0x6d, 0x65, 0x6d, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x79, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x74, 0x79, 0x12,
+ 0x25, 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63,
+ 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x6e, 0x6f, 0x6e, 0x5f, 0x74,
+ 0x68, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x61, 0x6e, 0x6f, 0x6e, 0x54, 0x68,
+ 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x6e,
+ 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c,
+ 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x0a,
+ 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12,
+ 0x29, 0x0a, 0x10, 0x73, 0x6c, 0x61, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x61,
+ 0x62, 0x6c, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x73, 0x6c, 0x61, 0x62, 0x52,
+ 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x6c,
+ 0x61, 0x62, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x61, 0x62, 0x6c, 0x65,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x6c, 0x61, 0x62, 0x55, 0x6e, 0x72, 0x65,
+ 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x67, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x67, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x67, 0x6d, 0x61, 0x6a, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x6d, 0x61, 0x6a, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65,
+ 0x74, 0x5f, 0x72, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74,
+ 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76,
+ 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65,
+ 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x16, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x74, 0x4e,
+ 0x6f, 0x64, 0x65, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x67,
+ 0x72, 0x65, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x67,
+ 0x72, 0x65, 0x66, 0x69, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x67, 0x73, 0x63, 0x61, 0x6e,
+ 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x67, 0x73, 0x63, 0x61, 0x6e, 0x12, 0x18,
+ 0x0a, 0x07, 0x70, 0x67, 0x73, 0x74, 0x65, 0x61, 0x6c, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x07, 0x70, 0x67, 0x73, 0x74, 0x65, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x67, 0x61, 0x63,
+ 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x67, 0x64, 0x65,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c,
+ 0x70, 0x67, 0x64, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x0a, 0x0a,
+ 0x70, 0x67, 0x6c, 0x61, 0x7a, 0x79, 0x66, 0x72, 0x65, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0a, 0x70, 0x67, 0x6c, 0x61, 0x7a, 0x79, 0x66, 0x72, 0x65, 0x65, 0x12, 0x20, 0x0a, 0x0b,
+ 0x70, 0x67, 0x6c, 0x61, 0x7a, 0x79, 0x66, 0x72, 0x65, 0x65, 0x64, 0x18, 0x1d, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0b, 0x70, 0x67, 0x6c, 0x61, 0x7a, 0x79, 0x66, 0x72, 0x65, 0x65, 0x64, 0x12, 0x26,
+ 0x0a, 0x0f, 0x74, 0x68, 0x70, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x63, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x70, 0x46, 0x61, 0x75, 0x6c,
+ 0x74, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x68, 0x70, 0x5f, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x1f, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x10, 0x74, 0x68, 0x70, 0x43, 0x6f, 0x6c, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x41,
+ 0x6c, 0x6c, 0x6f, 0x63, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x20, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73,
+ 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x21, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
+ 0x77, 0x61, 0x70, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x09, 0x73, 0x77, 0x61, 0x70, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x77,
+ 0x61, 0x70, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x73, 0x77, 0x61, 0x70, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78,
+ 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61,
+ 0x78, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x6d,
+ 0x61, 0x78, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c,
+ 0x73, 0x77, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x03,
+ 0x70, 0x73, 0x69, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x53, 0x49, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x03, 0x70,
+ 0x73, 0x69, 0x22, 0x73, 0x0a, 0x0c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x67, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x04, 0x68, 0x69, 0x67, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x6f,
+ 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6f, 0x6f, 0x6d, 0x12, 0x19, 0x0a, 0x08,
+ 0x6f, 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
+ 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x22, 0x84, 0x01, 0x0a, 0x08, 0x52, 0x64, 0x6d, 0x61,
+ 0x53, 0x74, 0x61, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32,
+ 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x64,
+ 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x65,
+ 0x0a, 0x09, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64,
+ 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x68, 0x63, 0x61, 0x48, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x68, 0x63, 0x61, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, 0x77, 0x0a, 0x06, 0x49, 0x4f, 0x53, 0x74, 0x61, 0x74, 0x12,
+ 0x37, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x03, 0x70, 0x73, 0x69, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x32,
+ 0x2e, 0x50, 0x53, 0x49, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x03, 0x70, 0x73, 0x69, 0x22, 0x8d,
+ 0x01, 0x0a, 0x07, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61,
+ 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72,
+ 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x72, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x16,
+ 0x0a, 0x06, 0x77, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
+ 0x77, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x69, 0x6f, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x69, 0x6f, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x69,
+ 0x6f, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x77, 0x69, 0x6f, 0x73, 0x22, 0x55,
+ 0x0a, 0x0b, 0x48, 0x75, 0x67, 0x65, 0x54, 0x6c, 0x62, 0x53, 0x74, 0x61, 0x74, 0x12, 0x18, 0x0a,
+ 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x67,
+ 0x65, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x67,
+ 0x65, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x32, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1154,35 +1348,42 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescGZIP(
return file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDescData
}
-var file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
+var file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_goTypes = []interface{}{
(*Metrics)(nil), // 0: io.containerd.cgroups.v2.Metrics
- (*PidsStat)(nil), // 1: io.containerd.cgroups.v2.PidsStat
- (*CPUStat)(nil), // 2: io.containerd.cgroups.v2.CPUStat
- (*MemoryStat)(nil), // 3: io.containerd.cgroups.v2.MemoryStat
- (*MemoryEvents)(nil), // 4: io.containerd.cgroups.v2.MemoryEvents
- (*RdmaStat)(nil), // 5: io.containerd.cgroups.v2.RdmaStat
- (*RdmaEntry)(nil), // 6: io.containerd.cgroups.v2.RdmaEntry
- (*IOStat)(nil), // 7: io.containerd.cgroups.v2.IOStat
- (*IOEntry)(nil), // 8: io.containerd.cgroups.v2.IOEntry
- (*HugeTlbStat)(nil), // 9: io.containerd.cgroups.v2.HugeTlbStat
+ (*PSIData)(nil), // 1: io.containerd.cgroups.v2.PSIData
+ (*PSIStats)(nil), // 2: io.containerd.cgroups.v2.PSIStats
+ (*PidsStat)(nil), // 3: io.containerd.cgroups.v2.PidsStat
+ (*CPUStat)(nil), // 4: io.containerd.cgroups.v2.CPUStat
+ (*MemoryStat)(nil), // 5: io.containerd.cgroups.v2.MemoryStat
+ (*MemoryEvents)(nil), // 6: io.containerd.cgroups.v2.MemoryEvents
+ (*RdmaStat)(nil), // 7: io.containerd.cgroups.v2.RdmaStat
+ (*RdmaEntry)(nil), // 8: io.containerd.cgroups.v2.RdmaEntry
+ (*IOStat)(nil), // 9: io.containerd.cgroups.v2.IOStat
+ (*IOEntry)(nil), // 10: io.containerd.cgroups.v2.IOEntry
+ (*HugeTlbStat)(nil), // 11: io.containerd.cgroups.v2.HugeTlbStat
}
var file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_depIdxs = []int32{
- 1, // 0: io.containerd.cgroups.v2.Metrics.pids:type_name -> io.containerd.cgroups.v2.PidsStat
- 2, // 1: io.containerd.cgroups.v2.Metrics.cpu:type_name -> io.containerd.cgroups.v2.CPUStat
- 3, // 2: io.containerd.cgroups.v2.Metrics.memory:type_name -> io.containerd.cgroups.v2.MemoryStat
- 5, // 3: io.containerd.cgroups.v2.Metrics.rdma:type_name -> io.containerd.cgroups.v2.RdmaStat
- 7, // 4: io.containerd.cgroups.v2.Metrics.io:type_name -> io.containerd.cgroups.v2.IOStat
- 9, // 5: io.containerd.cgroups.v2.Metrics.hugetlb:type_name -> io.containerd.cgroups.v2.HugeTlbStat
- 4, // 6: io.containerd.cgroups.v2.Metrics.memory_events:type_name -> io.containerd.cgroups.v2.MemoryEvents
- 6, // 7: io.containerd.cgroups.v2.RdmaStat.current:type_name -> io.containerd.cgroups.v2.RdmaEntry
- 6, // 8: io.containerd.cgroups.v2.RdmaStat.limit:type_name -> io.containerd.cgroups.v2.RdmaEntry
- 8, // 9: io.containerd.cgroups.v2.IOStat.usage:type_name -> io.containerd.cgroups.v2.IOEntry
- 10, // [10:10] is the sub-list for method output_type
- 10, // [10:10] is the sub-list for method input_type
- 10, // [10:10] is the sub-list for extension type_name
- 10, // [10:10] is the sub-list for extension extendee
- 0, // [0:10] is the sub-list for field type_name
+ 3, // 0: io.containerd.cgroups.v2.Metrics.pids:type_name -> io.containerd.cgroups.v2.PidsStat
+ 4, // 1: io.containerd.cgroups.v2.Metrics.cpu:type_name -> io.containerd.cgroups.v2.CPUStat
+ 5, // 2: io.containerd.cgroups.v2.Metrics.memory:type_name -> io.containerd.cgroups.v2.MemoryStat
+ 7, // 3: io.containerd.cgroups.v2.Metrics.rdma:type_name -> io.containerd.cgroups.v2.RdmaStat
+ 9, // 4: io.containerd.cgroups.v2.Metrics.io:type_name -> io.containerd.cgroups.v2.IOStat
+ 11, // 5: io.containerd.cgroups.v2.Metrics.hugetlb:type_name -> io.containerd.cgroups.v2.HugeTlbStat
+ 6, // 6: io.containerd.cgroups.v2.Metrics.memory_events:type_name -> io.containerd.cgroups.v2.MemoryEvents
+ 1, // 7: io.containerd.cgroups.v2.PSIStats.some:type_name -> io.containerd.cgroups.v2.PSIData
+ 1, // 8: io.containerd.cgroups.v2.PSIStats.full:type_name -> io.containerd.cgroups.v2.PSIData
+ 2, // 9: io.containerd.cgroups.v2.CPUStat.psi:type_name -> io.containerd.cgroups.v2.PSIStats
+ 2, // 10: io.containerd.cgroups.v2.MemoryStat.psi:type_name -> io.containerd.cgroups.v2.PSIStats
+ 8, // 11: io.containerd.cgroups.v2.RdmaStat.current:type_name -> io.containerd.cgroups.v2.RdmaEntry
+ 8, // 12: io.containerd.cgroups.v2.RdmaStat.limit:type_name -> io.containerd.cgroups.v2.RdmaEntry
+ 10, // 13: io.containerd.cgroups.v2.IOStat.usage:type_name -> io.containerd.cgroups.v2.IOEntry
+ 2, // 14: io.containerd.cgroups.v2.IOStat.psi:type_name -> io.containerd.cgroups.v2.PSIStats
+ 15, // [15:15] is the sub-list for method output_type
+ 15, // [15:15] is the sub-list for method input_type
+ 15, // [15:15] is the sub-list for extension type_name
+ 15, // [15:15] is the sub-list for extension extendee
+ 0, // [0:15] is the sub-list for field type_name
}
func init() { file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() }
@@ -1204,7 +1405,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PidsStat); i {
+ switch v := v.(*PSIData); i {
case 0:
return &v.state
case 1:
@@ -1216,7 +1417,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CPUStat); i {
+ switch v := v.(*PSIStats); i {
case 0:
return &v.state
case 1:
@@ -1228,7 +1429,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MemoryStat); i {
+ switch v := v.(*PidsStat); i {
case 0:
return &v.state
case 1:
@@ -1240,7 +1441,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MemoryEvents); i {
+ switch v := v.(*CPUStat); i {
case 0:
return &v.state
case 1:
@@ -1252,7 +1453,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RdmaStat); i {
+ switch v := v.(*MemoryStat); i {
case 0:
return &v.state
case 1:
@@ -1264,7 +1465,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RdmaEntry); i {
+ switch v := v.(*MemoryEvents); i {
case 0:
return &v.state
case 1:
@@ -1276,7 +1477,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*IOStat); i {
+ switch v := v.(*RdmaStat); i {
case 0:
return &v.state
case 1:
@@ -1288,7 +1489,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*IOEntry); i {
+ switch v := v.(*RdmaEntry); i {
case 0:
return &v.state
case 1:
@@ -1300,6 +1501,30 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
}
}
file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IOStat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IOEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HugeTlbStat); i {
case 0:
return &v.state
@@ -1318,7 +1543,7 @@ func file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_cgroups_cgroup2_stats_metrics_proto_rawDesc,
NumEnums: 0,
- NumMessages: 10,
+ NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.txt b/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.txt
index f9b0cd3430c7..26f5ba5de7e4 100644
--- a/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.txt
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.pb.txt
@@ -60,6 +60,56 @@ file {
json_name: "memoryEvents"
}
}
+ message_type {
+ name: "PSIData"
+ field {
+ name: "avg10"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ json_name: "avg10"
+ }
+ field {
+ name: "avg60"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ json_name: "avg60"
+ }
+ field {
+ name: "avg300"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ json_name: "avg300"
+ }
+ field {
+ name: "total"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_UINT64
+ json_name: "total"
+ }
+ }
+ message_type {
+ name: "PSIStats"
+ field {
+ name: "some"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".io.containerd.cgroups.v2.PSIData"
+ json_name: "some"
+ }
+ field {
+ name: "full"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".io.containerd.cgroups.v2.PSIData"
+ json_name: "full"
+ }
+ }
message_type {
name: "PidsStat"
field {
@@ -121,6 +171,14 @@ file {
type: TYPE_UINT64
json_name: "throttledUsec"
}
+ field {
+ name: "psi"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".io.containerd.cgroups.v2.PSIStats"
+ json_name: "psi"
+ }
}
message_type {
name: "MemoryStat"
@@ -369,6 +427,28 @@ file {
type: TYPE_UINT64
json_name: "swapLimit"
}
+ field {
+ name: "max_usage"
+ number: 36
+ label: LABEL_OPTIONAL
+ type: TYPE_UINT64
+ json_name: "maxUsage"
+ }
+ field {
+ name: "swap_max_usage"
+ number: 37
+ label: LABEL_OPTIONAL
+ type: TYPE_UINT64
+ json_name: "swapMaxUsage"
+ }
+ field {
+ name: "psi"
+ number: 38
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".io.containerd.cgroups.v2.PSIStats"
+ json_name: "psi"
+ }
}
message_type {
name: "MemoryEvents"
@@ -461,6 +541,14 @@ file {
type_name: ".io.containerd.cgroups.v2.IOEntry"
json_name: "usage"
}
+ field {
+ name: "psi"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".io.containerd.cgroups.v2.PSIStats"
+ json_name: "psi"
+ }
}
message_type {
name: "IOEntry"
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.proto b/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.proto
index 3e9ee6d6abf5..a4eae7a4e19f 100644
--- a/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.proto
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup2/stats/metrics.proto
@@ -14,6 +14,18 @@ message Metrics {
MemoryEvents memory_events = 8;
}
+message PSIData {
+ double avg10 = 1;
+ double avg60 = 2;
+ double avg300 = 3;
+ uint64 total = 4;
+}
+
+message PSIStats {
+ PSIData some = 1;
+ PSIData full = 2;
+}
+
message PidsStat {
uint64 current = 1;
uint64 limit = 2;
@@ -26,6 +38,7 @@ message CPUStat {
uint64 nr_periods = 4;
uint64 nr_throttled = 5;
uint64 throttled_usec = 6;
+ PSIStats psi = 7;
}
message MemoryStat {
@@ -64,6 +77,9 @@ message MemoryStat {
uint64 usage_limit = 33;
uint64 swap_usage = 34;
uint64 swap_limit = 35;
+ uint64 max_usage = 36;
+ uint64 swap_max_usage = 37;
+ PSIStats psi = 38;
}
message MemoryEvents {
@@ -87,6 +103,7 @@ message RdmaEntry {
message IOStat {
repeated IOEntry usage = 1;
+ PSIStats psi = 2;
}
message IOEntry {
diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup2/utils.go b/vendor/github.com/containerd/cgroups/v3/cgroup2/utils.go
index f5302444a7f1..20be57ca47e6 100644
--- a/vendor/github.com/containerd/cgroups/v3/cgroup2/utils.go
+++ b/vendor/github.com/containerd/cgroups/v3/cgroup2/utils.go
@@ -68,8 +68,9 @@ func remove(path string) error {
return fmt.Errorf("cgroups: unable to remove path %q: %w", path, err)
}
-// parseCgroupProcsFile parses /sys/fs/cgroup/$GROUPPATH/cgroup.procs
-func parseCgroupProcsFile(path string) ([]uint64, error) {
+// parseCgroupTasksFile parses /sys/fs/cgroup/$GROUPPATH/cgroup.procs or
+// /sys/fs/cgroup/$GROUPPATH/cgroup.threads
+func parseCgroupTasksFile(path string) ([]uint64, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
@@ -176,6 +177,10 @@ func ToResources(spec *specs.LinuxResources) *Resources {
resources.Memory = &Memory{}
if swap := mem.Swap; swap != nil {
resources.Memory.Swap = swap
+ if l := mem.Limit; l != nil {
+ reduce := *swap - *l
+ resources.Memory.Swap = &reduce
+ }
}
if l := mem.Limit; l != nil {
resources.Memory.Max = l
@@ -479,6 +484,74 @@ func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) {
return pageSizes, warn
}
+func getStatPSIFromFile(path string) *stats.PSIStats {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil
+ }
+ defer f.Close()
+
+ psistats := &stats.PSIStats{}
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ parts := strings.Fields(sc.Text())
+ var pv *stats.PSIData
+ switch parts[0] {
+ case "some":
+ psistats.Some = &stats.PSIData{}
+ pv = psistats.Some
+ case "full":
+ psistats.Full = &stats.PSIData{}
+ pv = psistats.Full
+ }
+ if pv != nil {
+ err = parsePSIData(parts[1:], pv)
+ if err != nil {
+ logrus.Errorf("failed to read file %s: %v", path, err)
+ return nil
+ }
+ }
+ }
+
+ if err := sc.Err(); err != nil {
+ logrus.Errorf("unable to parse PSI data: %v", err)
+ return nil
+ }
+ return psistats
+}
+
+func parsePSIData(psi []string, data *stats.PSIData) error {
+ for _, f := range psi {
+ kv := strings.SplitN(f, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("invalid PSI data: %q", f)
+ }
+ var pv *float64
+ switch kv[0] {
+ case "avg10":
+ pv = &data.Avg10
+ case "avg60":
+ pv = &data.Avg60
+ case "avg300":
+ pv = &data.Avg300
+ case "total":
+ v, err := strconv.ParseUint(kv[1], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid %s PSI value: %w", kv[0], err)
+ }
+ data.Total = v
+ }
+ if pv != nil {
+ v, err := strconv.ParseFloat(kv[1], 64)
+ if err != nil {
+ return fmt.Errorf("invalid %s PSI value: %w", kv[0], err)
+ }
+ *pv = v
+ }
+ }
+ return nil
+}
+
func getSubreaper() (int, error) {
var i uintptr
if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil {
diff --git a/vendor/github.com/containerd/console/.golangci.yml b/vendor/github.com/containerd/console/.golangci.yml
index fcba5e885f0a..abe3d84bb164 100644
--- a/vendor/github.com/containerd/console/.golangci.yml
+++ b/vendor/github.com/containerd/console/.golangci.yml
@@ -1,16 +1,16 @@
linters:
enable:
- - structcheck
- - varcheck
- - staticcheck
- - unconvert
- gofmt
- goimports
- - golint
- ineffassign
- - vet
- - unused
- misspell
+ - revive
+ - staticcheck
+ - structcheck
+ - unconvert
+ - unused
+ - varcheck
+ - vet
disable:
- errcheck
diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md
index 580b461a73db..a849a728f1e5 100644
--- a/vendor/github.com/containerd/console/README.md
+++ b/vendor/github.com/containerd/console/README.md
@@ -22,8 +22,8 @@ current.Resize(ws)
console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
- * [Project governance](/~https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](/~https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](/~https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+ * [Project governance](/~https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](/~https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](/~https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](/~https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go
index f989d28a41cd..dd587d88e072 100644
--- a/vendor/github.com/containerd/console/console.go
+++ b/vendor/github.com/containerd/console/console.go
@@ -22,7 +22,10 @@ import (
"os"
)
-var ErrNotAConsole = errors.New("provided file is not a console")
+var (
+ ErrNotAConsole = errors.New("provided file is not a console")
+ ErrNotImplemented = errors.New("not implemented")
+)
type File interface {
io.ReadWriteCloser
@@ -45,7 +48,7 @@ type Console interface {
SetRaw() error
// DisableEcho disables echo on the console
DisableEcho() error
- // Reset restores the console to its orignal state
+ // Reset restores the console to its original state
Reset() error
// Size returns the window size of the console
Size() (WinSize, error)
@@ -78,7 +81,7 @@ func Current() (c Console) {
}
// ConsoleFromFile returns a console using the provided file
-// nolint:golint
+// nolint:revive
func ConsoleFromFile(f File) (Console, error) {
if err := checkConsole(f); err != nil {
return nil, err
diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go
index c1c839ee3ae0..28b77b7a3891 100644
--- a/vendor/github.com/containerd/console/console_linux.go
+++ b/vendor/github.com/containerd/console/console_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
/*
diff --git a/vendor/github.com/containerd/console/console_other.go b/vendor/github.com/containerd/console/console_other.go
new file mode 100644
index 000000000000..933dfadddae6
--- /dev/null
+++ b/vendor/github.com/containerd/console/console_other.go
@@ -0,0 +1,36 @@
+//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package console
+
+// NewPty creates a new pty pair
+// The master is returned as the first console and a string
+// with the path to the pty slave is returned as the second
+func NewPty() (Console, string, error) {
+ return nil, "", ErrNotImplemented
+}
+
+// checkConsole checks if the provided file is a console
+func checkConsole(f File) error {
+ return ErrNotAConsole
+}
+
+func newMaster(f File) (Console, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go
index a08117695e32..161f5d126cba 100644
--- a/vendor/github.com/containerd/console/console_unix.go
+++ b/vendor/github.com/containerd/console/console_unix.go
@@ -1,4 +1,5 @@
-// +build darwin freebsd linux netbsd openbsd solaris
+//go:build darwin || freebsd || linux || netbsd || openbsd || zos
+// +build darwin freebsd linux netbsd openbsd zos
/*
Copyright The containerd Authors.
diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go
index 787c11fe56fd..6896db1825fe 100644
--- a/vendor/github.com/containerd/console/console_windows.go
+++ b/vendor/github.com/containerd/console/console_windows.go
@@ -24,12 +24,13 @@ import (
"golang.org/x/sys/windows"
)
-var (
- vtInputSupported bool
- ErrNotImplemented = errors.New("not implemented")
-)
+var vtInputSupported bool
func (m *master) initStdios() {
+ // Note: We discard console mode warnings, because in/out can be redirected.
+ //
+ // TODO: Investigate opening CONOUT$/CONIN$ to handle this correctly
+
m.in = windows.Handle(os.Stdin.Fd())
if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
@@ -39,8 +40,6 @@ func (m *master) initStdios() {
// Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles.
windows.SetConsoleMode(m.in, m.inMode)
- } else {
- fmt.Printf("failed to get console mode for stdin: %v\n", err)
}
m.out = windows.Handle(os.Stdout.Fd())
@@ -50,8 +49,6 @@ func (m *master) initStdios() {
} else {
windows.SetConsoleMode(m.out, m.outMode)
}
- } else {
- fmt.Printf("failed to get console mode for stdout: %v\n", err)
}
m.err = windows.Handle(os.Stderr.Fd())
@@ -61,8 +58,6 @@ func (m *master) initStdios() {
} else {
windows.SetConsoleMode(m.err, m.errMode)
}
- } else {
- fmt.Printf("failed to get console mode for stderr: %v\n", err)
}
}
@@ -94,6 +89,8 @@ func (m *master) SetRaw() error {
}
func (m *master) Reset() error {
+ var errs []error
+
for _, s := range []struct {
fd windows.Handle
mode uint32
@@ -103,10 +100,16 @@ func (m *master) Reset() error {
{m.err, m.errMode},
} {
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
- return fmt.Errorf("unable to restore console mode: %w", err)
+ // we can't just abort on the first error, otherwise we might leave
+ // the console in an unexpected state.
+ errs = append(errs, fmt.Errorf("unable to restore console mode: %w", err))
}
}
+ if len(errs) > 0 {
+ return errs[0]
+ }
+
return nil
}
diff --git a/vendor/github.com/containerd/console/console_zos.go b/vendor/github.com/containerd/console/console_zos.go
deleted file mode 100644
index b348a839a03b..000000000000
--- a/vendor/github.com/containerd/console/console_zos.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// +build zos
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package console
-
-import (
- "fmt"
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// NewPty creates a new pty pair
-// The master is returned as the first console and a string
-// with the path to the pty slave is returned as the second
-func NewPty() (Console, string, error) {
- var f File
- var err error
- var slave string
- for i := 0;; i++ {
- ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
- f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
- if err == nil {
- slave = fmt.Sprintf("/dev/ttyp%04d", i)
- break
- }
- if os.IsNotExist(err) {
- return nil, "", err
- }
- // else probably Resource Busy
- }
- m, err := newMaster(f)
- if err != nil {
- return nil, "", err
- }
- return m, slave, nil
-}
-
-type master struct {
- f File
- original *unix.Termios
-}
-
-func (m *master) Read(b []byte) (int, error) {
- return m.f.Read(b)
-}
-
-func (m *master) Write(b []byte) (int, error) {
- return m.f.Write(b)
-}
-
-func (m *master) Close() error {
- return m.f.Close()
-}
-
-func (m *master) Resize(ws WinSize) error {
- return tcswinsz(m.f.Fd(), ws)
-}
-
-func (m *master) ResizeFrom(c Console) error {
- ws, err := c.Size()
- if err != nil {
- return err
- }
- return m.Resize(ws)
-}
-
-func (m *master) Reset() error {
- if m.original == nil {
- return nil
- }
- return tcset(m.f.Fd(), m.original)
-}
-
-func (m *master) getCurrent() (unix.Termios, error) {
- var termios unix.Termios
- if err := tcget(m.f.Fd(), &termios); err != nil {
- return unix.Termios{}, err
- }
- return termios, nil
-}
-
-func (m *master) SetRaw() error {
- rawState, err := m.getCurrent()
- if err != nil {
- return err
- }
- rawState = cfmakeraw(rawState)
- rawState.Oflag = rawState.Oflag | unix.OPOST
- return tcset(m.f.Fd(), &rawState)
-}
-
-func (m *master) DisableEcho() error {
- rawState, err := m.getCurrent()
- if err != nil {
- return err
- }
- rawState.Lflag = rawState.Lflag &^ unix.ECHO
- return tcset(m.f.Fd(), &rawState)
-}
-
-func (m *master) Size() (WinSize, error) {
- return tcgwinsz(m.f.Fd())
-}
-
-func (m *master) Fd() uintptr {
- return m.f.Fd()
-}
-
-func (m *master) Name() string {
- return m.f.Name()
-}
-
-// checkConsole checks if the provided file is a console
-func checkConsole(f File) error {
- var termios unix.Termios
- if tcget(f.Fd(), &termios) != nil {
- return ErrNotAConsole
- }
- return nil
-}
-
-func newMaster(f File) (Console, error) {
- m := &master{
- f: f,
- }
- t, err := m.getCurrent()
- if err != nil {
- return nil, err
- }
- m.original = &t
- return m, nil
-}
-
-// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
-// created by us acts normally. In particular, a not-very-well-known default of
-// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
-// problem for terminal emulators, because we relay data from the terminal we
-// also relay that funky line discipline.
-func ClearONLCR(fd uintptr) error {
- return setONLCR(fd, false)
-}
-
-// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
-// created by us acts as intended for a terminal emulator.
-func SetONLCR(fd uintptr) error {
- return setONLCR(fd, true)
-}
diff --git a/vendor/github.com/containerd/console/pty_freebsd_cgo.go b/vendor/github.com/containerd/console/pty_freebsd_cgo.go
index cbd3cd7ea43d..22368623aab2 100644
--- a/vendor/github.com/containerd/console/pty_freebsd_cgo.go
+++ b/vendor/github.com/containerd/console/pty_freebsd_cgo.go
@@ -1,3 +1,4 @@
+//go:build freebsd && cgo
// +build freebsd,cgo
/*
diff --git a/vendor/github.com/containerd/console/pty_freebsd_nocgo.go b/vendor/github.com/containerd/console/pty_freebsd_nocgo.go
index b5e43181d4f3..ceb90a47b818 100644
--- a/vendor/github.com/containerd/console/pty_freebsd_nocgo.go
+++ b/vendor/github.com/containerd/console/pty_freebsd_nocgo.go
@@ -1,3 +1,4 @@
+//go:build freebsd && !cgo
// +build freebsd,!cgo
/*
diff --git a/vendor/github.com/containerd/console/pty_unix.go b/vendor/github.com/containerd/console/pty_unix.go
index d5a6bd8ca2e8..f5a5b8058c65 100644
--- a/vendor/github.com/containerd/console/pty_unix.go
+++ b/vendor/github.com/containerd/console/pty_unix.go
@@ -1,4 +1,5 @@
-// +build darwin linux netbsd openbsd solaris
+//go:build darwin || linux || netbsd || openbsd
+// +build darwin linux netbsd openbsd
/*
Copyright The containerd Authors.
diff --git a/vendor/github.com/containerd/console/pty_zos.go b/vendor/github.com/containerd/console/pty_zos.go
new file mode 100644
index 000000000000..58f59aba58c9
--- /dev/null
+++ b/vendor/github.com/containerd/console/pty_zos.go
@@ -0,0 +1,43 @@
+//go:build zos
+// +build zos
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package console
+
+import (
+ "fmt"
+ "os"
+)
+
+// openpt allocates a new pseudo-terminal by opening the first available /dev/ptypXX device
+func openpt() (*os.File, error) {
+ var f *os.File
+ var err error
+ for i := 0; ; i++ {
+ ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
+ f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
+ if err == nil {
+ break
+ }
+ if os.IsNotExist(err) {
+ return nil, err
+ }
+ // else probably Resource Busy
+ }
+ return f, nil
+}
diff --git a/vendor/github.com/containerd/console/tc_freebsd_cgo.go b/vendor/github.com/containerd/console/tc_freebsd_cgo.go
index 0f3d27273094..33282579411e 100644
--- a/vendor/github.com/containerd/console/tc_freebsd_cgo.go
+++ b/vendor/github.com/containerd/console/tc_freebsd_cgo.go
@@ -1,3 +1,4 @@
+//go:build freebsd && cgo
// +build freebsd,cgo
/*
diff --git a/vendor/github.com/containerd/console/tc_freebsd_nocgo.go b/vendor/github.com/containerd/console/tc_freebsd_nocgo.go
index 087fc158a169..18a9b9cbea97 100644
--- a/vendor/github.com/containerd/console/tc_freebsd_nocgo.go
+++ b/vendor/github.com/containerd/console/tc_freebsd_nocgo.go
@@ -1,3 +1,4 @@
+//go:build freebsd && !cgo
// +build freebsd,!cgo
/*
diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go
index f0cec06a72dc..0e76f6cc3e0a 100644
--- a/vendor/github.com/containerd/console/tc_openbsd_cgo.go
+++ b/vendor/github.com/containerd/console/tc_openbsd_cgo.go
@@ -1,3 +1,4 @@
+//go:build openbsd && cgo
// +build openbsd,cgo
/*
diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go
index daccce20585a..dca92418b0e5 100644
--- a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go
+++ b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go
@@ -1,3 +1,4 @@
+//go:build openbsd && !cgo
// +build openbsd,!cgo
/*
diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go
deleted file mode 100644
index e36a68edd1ed..000000000000
--- a/vendor/github.com/containerd/console/tc_solaris_cgo.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build solaris,cgo
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package console
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-//#include
-import "C"
-
-const (
- cmdTcGet = unix.TCGETS
- cmdTcSet = unix.TCSETS
-)
-
-// ptsname retrieves the name of the first available pts for the given master.
-func ptsname(f *os.File) (string, error) {
- ptspath, err := C.ptsname(C.int(f.Fd()))
- if err != nil {
- return "", err
- }
- return C.GoString(ptspath), nil
-}
-
-// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
-// unlockpt should be called before opening the slave side of a pty.
-func unlockpt(f *os.File) error {
- if _, err := C.grantpt(C.int(f.Fd())); err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go
deleted file mode 100644
index eb0bd2c36b83..000000000000
--- a/vendor/github.com/containerd/console/tc_solaris_nocgo.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build solaris,!cgo
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-//
-// Implementing the functions below requires cgo support. Non-cgo stubs
-// versions are defined below to enable cross-compilation of source code
-// that depends on these functions, but the resultant cross-compiled
-// binaries cannot actually be used. If the stub function(s) below are
-// actually invoked they will display an error message and cause the
-// calling process to exit.
-//
-
-package console
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-const (
- cmdTcGet = unix.TCGETS
- cmdTcSet = unix.TCSETS
-)
-
-func ptsname(f *os.File) (string, error) {
- panic("ptsname() support requires cgo.")
-}
-
-func unlockpt(f *os.File) error {
- panic("unlockpt() support requires cgo.")
-}
diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go
index a6bf01e8d1a2..2ecf188fca33 100644
--- a/vendor/github.com/containerd/console/tc_unix.go
+++ b/vendor/github.com/containerd/console/tc_unix.go
@@ -1,4 +1,5 @@
-// +build darwin freebsd linux netbsd openbsd solaris zos
+//go:build darwin || freebsd || linux || netbsd || openbsd || zos
+// +build darwin freebsd linux netbsd openbsd zos
/*
Copyright The containerd Authors.
@@ -83,7 +84,7 @@ func cfmakeraw(t unix.Termios) unix.Termios {
t.Oflag &^= unix.OPOST
t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
t.Cflag &^= (unix.CSIZE | unix.PARENB)
- t.Cflag &^= unix.CS8
+ t.Cflag |= unix.CS8
t.Cc[unix.VMIN] = 1
t.Cc[unix.VTIME] = 0
diff --git a/vendor/github.com/containerd/console/tc_zos.go b/vendor/github.com/containerd/console/tc_zos.go
index 4262eaf4cc07..fc90ba5fb86e 100644
--- a/vendor/github.com/containerd/console/tc_zos.go
+++ b/vendor/github.com/containerd/console/tc_zos.go
@@ -17,6 +17,9 @@
package console
import (
+ "os"
+ "strings"
+
"golang.org/x/sys/unix"
)
@@ -24,3 +27,13 @@ const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)
+
+// unlockpt is a no-op on zos.
+func unlockpt(_ *os.File) error {
+ return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+ return "/dev/ttyp" + strings.TrimPrefix(f.Name(), "/dev/ptyp"), nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go
index eef9ab6aef6f..f960350c1613 100644
--- a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go
+++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go
@@ -15,9 +15,3 @@
*/
package sandbox
-
-// Not implemented types introduced in later versions and included for API compatibility
-// Use of these types should only use not implemented errors
-
-type SandboxMetricsRequest struct{}
-type SandboxMetricsResponse struct{}
diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go
index 5b3d4aa786f3..8701fbae5ce2 100644
--- a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go
+++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go
@@ -43,11 +43,12 @@ type CreateSandboxRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
- BundlePath string `protobuf:"bytes,2,opt,name=bundle_path,json=bundlePath,proto3" json:"bundle_path,omitempty"`
- Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
- Options *anypb.Any `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"`
- NetnsPath string `protobuf:"bytes,5,opt,name=netns_path,json=netnsPath,proto3" json:"netns_path,omitempty"`
+ SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ BundlePath string `protobuf:"bytes,2,opt,name=bundle_path,json=bundlePath,proto3" json:"bundle_path,omitempty"`
+ Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
+ Options *anypb.Any `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"`
+ NetnsPath string `protobuf:"bytes,5,opt,name=netns_path,json=netnsPath,proto3" json:"netns_path,omitempty"`
+ Annotations map[string]string `protobuf:"bytes,6,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *CreateSandboxRequest) Reset() {
@@ -117,6 +118,13 @@ func (x *CreateSandboxRequest) GetNetnsPath() string {
return ""
}
+func (x *CreateSandboxRequest) GetAnnotations() map[string]string {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
type CreateSandboxResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -967,6 +975,100 @@ func (*ShutdownSandboxResponse) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{17}
}
+type SandboxMetricsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+}
+
+func (x *SandboxMetricsRequest) Reset() {
+ *x = SandboxMetricsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SandboxMetricsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SandboxMetricsRequest) ProtoMessage() {}
+
+func (x *SandboxMetricsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SandboxMetricsRequest.ProtoReflect.Descriptor instead.
+func (*SandboxMetricsRequest) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *SandboxMetricsRequest) GetSandboxID() string {
+ if x != nil {
+ return x.SandboxID
+ }
+ return ""
+}
+
+type SandboxMetricsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Metrics *types.Metric `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"`
+}
+
+func (x *SandboxMetricsResponse) Reset() {
+ *x = SandboxMetricsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SandboxMetricsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SandboxMetricsResponse) ProtoMessage() {}
+
+func (x *SandboxMetricsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SandboxMetricsResponse.ProtoReflect.Descriptor instead.
+func (*SandboxMetricsResponse) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *SandboxMetricsResponse) GetMetrics() *types.Metric {
+ if x != nil {
+ return x.Metrics
+ }
+ return nil
+}
+
var File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc = []byte{
@@ -987,179 +1089,210 @@ var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_r
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
- 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1f, 0x0a,
- 0x0b, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2f,
- 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12,
- 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x50, 0x61, 0x74, 0x68, 0x22, 0x17,
- 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
- 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x63, 0x0a,
- 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
- 0x41, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x49, 0x64, 0x22, 0x4a, 0x0a, 0x10, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74,
- 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c,
- 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
- 0x22, 0x56, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
- 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x74, 0x69, 0x6d,
- 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70,
- 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x91, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0xfe, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f,
0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64,
0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x75, 0x6e, 0x64, 0x6c,
+ 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75,
+ 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74,
+ 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e,
+ 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
- 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x66, 0x0a, 0x0b, 0x61,
- 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x44, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75,
- 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74,
+ 0x6e, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
+ 0x65, 0x74, 0x6e, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x66, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x22, 0x17, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x0a, 0x13, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22,
+ 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x64, 0x41, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x4a, 0x0a, 0x10, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f,
+ 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c,
+ 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f,
+ 0x72, 0x6d, 0x22, 0x56, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x74,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74,
+ 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x91, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x66, 0x0a,
+ 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
+ 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x12, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x6f, 0x0a, 0x13, 0x57, 0x61,
+ 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76,
+ 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65,
+ 0x72, 0x62, 0x6f, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x15, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10,
+ 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f,
+ 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a,
+ 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e,
+ 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x12, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e,
- 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x6f, 0x0a, 0x13, 0x57, 0x61, 0x69, 0x74,
+ 0x02, 0x38, 0x01, 0x22, 0x2c, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49,
+ 0x64, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x37, 0x0a, 0x16, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x68,
+ 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x4c, 0x0a,
+ 0x16, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x32, 0xbd, 0x08, 0x0a, 0x07,
+ 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x7a, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x08,
+ 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
+ 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
+ 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x53, 0x74, 0x6f,
+ 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
+ 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x70,
0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
- 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62,
- 0x6f, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x15, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a,
- 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03,
- 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x14,
- 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
- 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x05,
- 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
- 0x79, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66, 0x6f,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x22, 0x2c, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22,
- 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x37, 0x0a, 0x16, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e,
- 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x68, 0x75, 0x74,
- 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x32, 0xbe, 0x07, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12,
- 0x7a, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75,
- 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x32, 0x2e, 0x63, 0x6f,
+ 0x74, 0x0a, 0x0b, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x31,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57,
+ 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72,
+ 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76,
+ 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
- 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72,
- 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e,
- 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
- 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75,
- 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75,
+ 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x66, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75,
0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75,
- 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
- 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x57, 0x61, 0x69, 0x74, 0x53,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
- 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a,
- 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33,
+ 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d,
+ 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e,
+ 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x53, 0x68,
+ 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x35, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68,
+ 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x0e,
+ 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x34,
0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74,
0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
- 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0b, 0x50, 0x69, 0x6e,
- 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
- 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d,
- 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x75,
- 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e,
- 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x3b,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1174,7 +1307,7 @@ func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_
return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData
}
-var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
+var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes = []interface{}{
(*CreateSandboxRequest)(nil), // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest
(*CreateSandboxResponse)(nil), // 1: containerd.runtime.sandbox.v1.CreateSandboxResponse
@@ -1194,46 +1327,54 @@ var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_g
(*PingResponse)(nil), // 15: containerd.runtime.sandbox.v1.PingResponse
(*ShutdownSandboxRequest)(nil), // 16: containerd.runtime.sandbox.v1.ShutdownSandboxRequest
(*ShutdownSandboxResponse)(nil), // 17: containerd.runtime.sandbox.v1.ShutdownSandboxResponse
- nil, // 18: containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry
- nil, // 19: containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry
- (*types.Mount)(nil), // 20: containerd.types.Mount
- (*anypb.Any)(nil), // 21: google.protobuf.Any
- (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp
- (*types.Platform)(nil), // 23: containerd.types.Platform
+ (*SandboxMetricsRequest)(nil), // 18: containerd.runtime.sandbox.v1.SandboxMetricsRequest
+ (*SandboxMetricsResponse)(nil), // 19: containerd.runtime.sandbox.v1.SandboxMetricsResponse
+ nil, // 20: containerd.runtime.sandbox.v1.CreateSandboxRequest.AnnotationsEntry
+ nil, // 21: containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry
+ nil, // 22: containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry
+ (*types.Mount)(nil), // 23: containerd.types.Mount
+ (*anypb.Any)(nil), // 24: google.protobuf.Any
+ (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp
+ (*types.Platform)(nil), // 26: containerd.types.Platform
+ (*types.Metric)(nil), // 27: containerd.types.Metric
}
var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs = []int32{
- 20, // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest.rootfs:type_name -> containerd.types.Mount
- 21, // 1: containerd.runtime.sandbox.v1.CreateSandboxRequest.options:type_name -> google.protobuf.Any
- 22, // 2: containerd.runtime.sandbox.v1.StartSandboxResponse.created_at:type_name -> google.protobuf.Timestamp
- 23, // 3: containerd.runtime.sandbox.v1.PlatformResponse.platform:type_name -> containerd.types.Platform
- 21, // 4: containerd.runtime.sandbox.v1.UpdateSandboxRequest.resources:type_name -> google.protobuf.Any
- 18, // 5: containerd.runtime.sandbox.v1.UpdateSandboxRequest.annotations:type_name -> containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry
- 22, // 6: containerd.runtime.sandbox.v1.WaitSandboxResponse.exited_at:type_name -> google.protobuf.Timestamp
- 19, // 7: containerd.runtime.sandbox.v1.SandboxStatusResponse.info:type_name -> containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry
- 22, // 8: containerd.runtime.sandbox.v1.SandboxStatusResponse.created_at:type_name -> google.protobuf.Timestamp
- 22, // 9: containerd.runtime.sandbox.v1.SandboxStatusResponse.exited_at:type_name -> google.protobuf.Timestamp
- 21, // 10: containerd.runtime.sandbox.v1.SandboxStatusResponse.extra:type_name -> google.protobuf.Any
- 0, // 11: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:input_type -> containerd.runtime.sandbox.v1.CreateSandboxRequest
- 2, // 12: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:input_type -> containerd.runtime.sandbox.v1.StartSandboxRequest
- 4, // 13: containerd.runtime.sandbox.v1.Sandbox.Platform:input_type -> containerd.runtime.sandbox.v1.PlatformRequest
- 6, // 14: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:input_type -> containerd.runtime.sandbox.v1.StopSandboxRequest
- 9, // 15: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:input_type -> containerd.runtime.sandbox.v1.WaitSandboxRequest
- 12, // 16: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:input_type -> containerd.runtime.sandbox.v1.SandboxStatusRequest
- 14, // 17: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:input_type -> containerd.runtime.sandbox.v1.PingRequest
- 16, // 18: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:input_type -> containerd.runtime.sandbox.v1.ShutdownSandboxRequest
- 1, // 19: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:output_type -> containerd.runtime.sandbox.v1.CreateSandboxResponse
- 3, // 20: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:output_type -> containerd.runtime.sandbox.v1.StartSandboxResponse
- 5, // 21: containerd.runtime.sandbox.v1.Sandbox.Platform:output_type -> containerd.runtime.sandbox.v1.PlatformResponse
- 7, // 22: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:output_type -> containerd.runtime.sandbox.v1.StopSandboxResponse
- 10, // 23: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:output_type -> containerd.runtime.sandbox.v1.WaitSandboxResponse
- 13, // 24: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:output_type -> containerd.runtime.sandbox.v1.SandboxStatusResponse
- 15, // 25: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:output_type -> containerd.runtime.sandbox.v1.PingResponse
- 17, // 26: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:output_type -> containerd.runtime.sandbox.v1.ShutdownSandboxResponse
- 19, // [19:27] is the sub-list for method output_type
- 11, // [11:19] is the sub-list for method input_type
- 11, // [11:11] is the sub-list for extension type_name
- 11, // [11:11] is the sub-list for extension extendee
- 0, // [0:11] is the sub-list for field type_name
+ 23, // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest.rootfs:type_name -> containerd.types.Mount
+ 24, // 1: containerd.runtime.sandbox.v1.CreateSandboxRequest.options:type_name -> google.protobuf.Any
+ 20, // 2: containerd.runtime.sandbox.v1.CreateSandboxRequest.annotations:type_name -> containerd.runtime.sandbox.v1.CreateSandboxRequest.AnnotationsEntry
+ 25, // 3: containerd.runtime.sandbox.v1.StartSandboxResponse.created_at:type_name -> google.protobuf.Timestamp
+ 26, // 4: containerd.runtime.sandbox.v1.PlatformResponse.platform:type_name -> containerd.types.Platform
+ 24, // 5: containerd.runtime.sandbox.v1.UpdateSandboxRequest.resources:type_name -> google.protobuf.Any
+ 21, // 6: containerd.runtime.sandbox.v1.UpdateSandboxRequest.annotations:type_name -> containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry
+ 25, // 7: containerd.runtime.sandbox.v1.WaitSandboxResponse.exited_at:type_name -> google.protobuf.Timestamp
+ 22, // 8: containerd.runtime.sandbox.v1.SandboxStatusResponse.info:type_name -> containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry
+ 25, // 9: containerd.runtime.sandbox.v1.SandboxStatusResponse.created_at:type_name -> google.protobuf.Timestamp
+ 25, // 10: containerd.runtime.sandbox.v1.SandboxStatusResponse.exited_at:type_name -> google.protobuf.Timestamp
+ 24, // 11: containerd.runtime.sandbox.v1.SandboxStatusResponse.extra:type_name -> google.protobuf.Any
+ 27, // 12: containerd.runtime.sandbox.v1.SandboxMetricsResponse.metrics:type_name -> containerd.types.Metric
+ 0, // 13: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:input_type -> containerd.runtime.sandbox.v1.CreateSandboxRequest
+ 2, // 14: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:input_type -> containerd.runtime.sandbox.v1.StartSandboxRequest
+ 4, // 15: containerd.runtime.sandbox.v1.Sandbox.Platform:input_type -> containerd.runtime.sandbox.v1.PlatformRequest
+ 6, // 16: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:input_type -> containerd.runtime.sandbox.v1.StopSandboxRequest
+ 9, // 17: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:input_type -> containerd.runtime.sandbox.v1.WaitSandboxRequest
+ 12, // 18: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:input_type -> containerd.runtime.sandbox.v1.SandboxStatusRequest
+ 14, // 19: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:input_type -> containerd.runtime.sandbox.v1.PingRequest
+ 16, // 20: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:input_type -> containerd.runtime.sandbox.v1.ShutdownSandboxRequest
+ 18, // 21: containerd.runtime.sandbox.v1.Sandbox.SandboxMetrics:input_type -> containerd.runtime.sandbox.v1.SandboxMetricsRequest
+ 1, // 22: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:output_type -> containerd.runtime.sandbox.v1.CreateSandboxResponse
+ 3, // 23: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:output_type -> containerd.runtime.sandbox.v1.StartSandboxResponse
+ 5, // 24: containerd.runtime.sandbox.v1.Sandbox.Platform:output_type -> containerd.runtime.sandbox.v1.PlatformResponse
+ 7, // 25: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:output_type -> containerd.runtime.sandbox.v1.StopSandboxResponse
+ 10, // 26: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:output_type -> containerd.runtime.sandbox.v1.WaitSandboxResponse
+ 13, // 27: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:output_type -> containerd.runtime.sandbox.v1.SandboxStatusResponse
+ 15, // 28: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:output_type -> containerd.runtime.sandbox.v1.PingResponse
+ 17, // 29: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:output_type -> containerd.runtime.sandbox.v1.ShutdownSandboxResponse
+ 19, // 30: containerd.runtime.sandbox.v1.Sandbox.SandboxMetrics:output_type -> containerd.runtime.sandbox.v1.SandboxMetricsResponse
+ 22, // [22:31] is the sub-list for method output_type
+ 13, // [13:22] is the sub-list for method input_type
+ 13, // [13:13] is the sub-list for extension type_name
+ 13, // [13:13] is the sub-list for extension extendee
+ 0, // [0:13] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_init() }
@@ -1458,6 +1599,30 @@ func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_
return nil
}
}
+ file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SandboxMetricsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SandboxMetricsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -1465,7 +1630,7 @@ func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc,
NumEnums: 0,
- NumMessages: 20,
+ NumMessages: 23,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto
index a051f3ea354c..0cf801c909c2 100644
--- a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto
+++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto
@@ -23,6 +23,7 @@ import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/api/types/platform.proto";
+import "github.com/containerd/containerd/api/types/metrics.proto";
option go_package = "github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox";
@@ -34,7 +35,7 @@ service Sandbox {
// It is a good place to initialize sandbox environment.
rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse);
- // StartSandbox will start previsouly created sandbox.
+ // StartSandbox will start a previously created sandbox.
rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse);
// Platform queries the platform the sandbox is going to run containers on.
@@ -44,7 +45,7 @@ service Sandbox {
// StopSandbox will stop existing sandbox instance
rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse);
- // WaitSandbox blocks until sanbox exits.
+ // WaitSandbox blocks until sandbox exits.
rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse);
// SandboxStatus will return current status of the running sandbox instance
@@ -55,6 +56,9 @@ service Sandbox {
// ShutdownSandbox must shutdown shim instance.
rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse);
+
+ // SandboxMetrics retrieves metrics about a sandbox instance.
+ rpc SandboxMetrics(SandboxMetricsRequest) returns (SandboxMetricsResponse);
}
message CreateSandboxRequest {
@@ -63,6 +67,7 @@ message CreateSandboxRequest {
repeated containerd.types.Mount rootfs = 3;
google.protobuf.Any options = 4;
string netns_path = 5;
+ map annotations = 6;
}
message CreateSandboxResponse {}
@@ -134,3 +139,11 @@ message ShutdownSandboxRequest {
}
message ShutdownSandboxResponse {}
+
+message SandboxMetricsRequest {
+ string sandbox_id = 1;
+}
+
+message SandboxMetricsResponse {
+ containerd.types.Metric metrics = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go
index f7942498610f..aff8317c825f 100644
--- a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go
+++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go
@@ -25,14 +25,14 @@ type SandboxClient interface {
// CreateSandbox will be called right after sandbox shim instance launched.
// It is a good place to initialize sandbox environment.
CreateSandbox(ctx context.Context, in *CreateSandboxRequest, opts ...grpc.CallOption) (*CreateSandboxResponse, error)
- // StartSandbox will start previsouly created sandbox.
+ // StartSandbox will start a previously created sandbox.
StartSandbox(ctx context.Context, in *StartSandboxRequest, opts ...grpc.CallOption) (*StartSandboxResponse, error)
// Platform queries the platform the sandbox is going to run containers on.
// containerd will use this to generate a proper OCI spec.
Platform(ctx context.Context, in *PlatformRequest, opts ...grpc.CallOption) (*PlatformResponse, error)
// StopSandbox will stop existing sandbox instance
StopSandbox(ctx context.Context, in *StopSandboxRequest, opts ...grpc.CallOption) (*StopSandboxResponse, error)
- // WaitSandbox blocks until sanbox exits.
+ // WaitSandbox blocks until sandbox exits.
WaitSandbox(ctx context.Context, in *WaitSandboxRequest, opts ...grpc.CallOption) (*WaitSandboxResponse, error)
// SandboxStatus will return current status of the running sandbox instance
SandboxStatus(ctx context.Context, in *SandboxStatusRequest, opts ...grpc.CallOption) (*SandboxStatusResponse, error)
@@ -40,6 +40,8 @@ type SandboxClient interface {
PingSandbox(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error)
// ShutdownSandbox must shutdown shim instance.
ShutdownSandbox(ctx context.Context, in *ShutdownSandboxRequest, opts ...grpc.CallOption) (*ShutdownSandboxResponse, error)
+ // SandboxMetrics retrieves metrics about a sandbox instance.
+ SandboxMetrics(ctx context.Context, in *SandboxMetricsRequest, opts ...grpc.CallOption) (*SandboxMetricsResponse, error)
}
type sandboxClient struct {
@@ -122,6 +124,15 @@ func (c *sandboxClient) ShutdownSandbox(ctx context.Context, in *ShutdownSandbox
return out, nil
}
+func (c *sandboxClient) SandboxMetrics(ctx context.Context, in *SandboxMetricsRequest, opts ...grpc.CallOption) (*SandboxMetricsResponse, error) {
+ out := new(SandboxMetricsResponse)
+ err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/SandboxMetrics", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// SandboxServer is the server API for Sandbox service.
// All implementations must embed UnimplementedSandboxServer
// for forward compatibility
@@ -129,14 +140,14 @@ type SandboxServer interface {
// CreateSandbox will be called right after sandbox shim instance launched.
// It is a good place to initialize sandbox environment.
CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error)
- // StartSandbox will start previsouly created sandbox.
+ // StartSandbox will start a previously created sandbox.
StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error)
// Platform queries the platform the sandbox is going to run containers on.
// containerd will use this to generate a proper OCI spec.
Platform(context.Context, *PlatformRequest) (*PlatformResponse, error)
// StopSandbox will stop existing sandbox instance
StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error)
- // WaitSandbox blocks until sanbox exits.
+ // WaitSandbox blocks until sandbox exits.
WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error)
// SandboxStatus will return current status of the running sandbox instance
SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error)
@@ -144,6 +155,8 @@ type SandboxServer interface {
PingSandbox(context.Context, *PingRequest) (*PingResponse, error)
// ShutdownSandbox must shutdown shim instance.
ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error)
+ // SandboxMetrics retrieves metrics about a sandbox instance.
+ SandboxMetrics(context.Context, *SandboxMetricsRequest) (*SandboxMetricsResponse, error)
mustEmbedUnimplementedSandboxServer()
}
@@ -175,6 +188,9 @@ func (UnimplementedSandboxServer) PingSandbox(context.Context, *PingRequest) (*P
func (UnimplementedSandboxServer) ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ShutdownSandbox not implemented")
}
+func (UnimplementedSandboxServer) SandboxMetrics(context.Context, *SandboxMetricsRequest) (*SandboxMetricsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SandboxMetrics not implemented")
+}
func (UnimplementedSandboxServer) mustEmbedUnimplementedSandboxServer() {}
// UnsafeSandboxServer may be embedded to opt out of forward compatibility for this service.
@@ -332,6 +348,24 @@ func _Sandbox_ShutdownSandbox_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _Sandbox_SandboxMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SandboxMetricsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SandboxServer).SandboxMetrics(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/SandboxMetrics",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SandboxServer).SandboxMetrics(ctx, req.(*SandboxMetricsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// Sandbox_ServiceDesc is the grpc.ServiceDesc for Sandbox service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -371,6 +405,10 @@ var Sandbox_ServiceDesc = grpc.ServiceDesc{
MethodName: "ShutdownSandbox",
Handler: _Sandbox_ShutdownSandbox_Handler,
},
+ {
+ MethodName: "SandboxMetrics",
+ Handler: _Sandbox_SandboxMetrics_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto",
diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go
index d935fe611f13..65bab2dfccef 100644
--- a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go
+++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go
@@ -16,6 +16,7 @@ type TTRPCSandboxService interface {
SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error)
PingSandbox(context.Context, *PingRequest) (*PingResponse, error)
ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error)
+ SandboxMetrics(context.Context, *SandboxMetricsRequest) (*SandboxMetricsResponse, error)
}
func RegisterTTRPCSandboxService(srv *ttrpc.Server, svc TTRPCSandboxService) {
@@ -77,6 +78,13 @@ func RegisterTTRPCSandboxService(srv *ttrpc.Server, svc TTRPCSandboxService) {
}
return svc.ShutdownSandbox(ctx, &req)
},
+ "SandboxMetrics": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req SandboxMetricsRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.SandboxMetrics(ctx, &req)
+ },
},
})
}
@@ -154,3 +162,11 @@ func (c *ttrpcsandboxClient) ShutdownSandbox(ctx context.Context, req *ShutdownS
}
return &resp, nil
}
+
+func (c *ttrpcsandboxClient) SandboxMetrics(ctx context.Context, req *SandboxMetricsRequest) (*SandboxMetricsResponse, error) {
+ var resp SandboxMetricsResponse
+ if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "SandboxMetrics", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go
new file mode 100644
index 000000000000..8090011df33a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go
@@ -0,0 +1,174 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
+package containers
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCContainersService interface {
+ Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error)
+ List(context.Context, *ListContainersRequest) (*ListContainersResponse, error)
+ ListStream(context.Context, *ListContainersRequest, TTRPCContainers_ListStreamServer) error
+ Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
+ Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
+ Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error)
+}
+
+type TTRPCContainers_ListStreamServer interface {
+ Send(*ListContainerMessage) error
+ ttrpc.StreamServer
+}
+
+type ttrpccontainersListStreamServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpccontainersListStreamServer) Send(m *ListContainerMessage) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+func RegisterTTRPCContainersService(srv *ttrpc.Server, svc TTRPCContainersService) {
+ srv.RegisterService("containerd.services.containers.v1.Containers", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req GetContainerRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Get(ctx, &req)
+ },
+ "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListContainersRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.List(ctx, &req)
+ },
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CreateContainerRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UpdateContainerRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteContainerRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ },
+ Streams: map[string]ttrpc.Stream{
+ "ListStream": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ m := new(ListContainersRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return nil, svc.ListStream(ctx, m, &ttrpccontainersListStreamServer{stream})
+ },
+ StreamingClient: false,
+ StreamingServer: true,
+ },
+ },
+ })
+}
+
+type TTRPCContainersClient interface {
+ Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error)
+ List(context.Context, *ListContainersRequest) (*ListContainersResponse, error)
+ ListStream(context.Context, *ListContainersRequest) (TTRPCContainers_ListStreamClient, error)
+ Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
+ Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
+ Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error)
+}
+
+type ttrpccontainersClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCContainersClient(client *ttrpc.Client) TTRPCContainersClient {
+ return &ttrpccontainersClient{
+ client: client,
+ }
+}
+
+func (c *ttrpccontainersClient) Get(ctx context.Context, req *GetContainerRequest) (*GetContainerResponse, error) {
+ var resp GetContainerResponse
+ if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Get", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontainersClient) List(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) {
+ var resp ListContainersResponse
+ if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "List", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontainersClient) ListStream(ctx context.Context, req *ListContainersRequest) (TTRPCContainers_ListStreamClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: false,
+ StreamingServer: true,
+ }, "containerd.services.containers.v1.Containers", "ListStream", req)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpccontainersListStreamClient{stream}
+ return x, nil
+}
+
+type TTRPCContainers_ListStreamClient interface {
+ Recv() (*ListContainerMessage, error)
+ ttrpc.ClientStream
+}
+
+type ttrpccontainersListStreamClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpccontainersListStreamClient) Recv() (*ListContainerMessage, error) {
+ m := new(ListContainerMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *ttrpccontainersClient) Create(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) {
+ var resp CreateContainerResponse
+ if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontainersClient) Update(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) {
+ var resp UpdateContainerResponse
+ if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontainersClient) Delete(ctx context.Context, req *DeleteContainerRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go
new file mode 100644
index 000000000000..efa241a2b256
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go
@@ -0,0 +1,311 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/content/v1/content.proto
+package content
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCContentService interface {
+ Info(context.Context, *InfoRequest) (*InfoResponse, error)
+ Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
+ List(context.Context, *ListContentRequest, TTRPCContent_ListServer) error
+ Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
+ Read(context.Context, *ReadContentRequest, TTRPCContent_ReadServer) error
+ Status(context.Context, *StatusRequest) (*StatusResponse, error)
+ ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
+ Write(context.Context, TTRPCContent_WriteServer) error
+ Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
+}
+
+type TTRPCContent_ListServer interface {
+ Send(*ListContentResponse) error
+ ttrpc.StreamServer
+}
+
+type ttrpccontentListServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpccontentListServer) Send(m *ListContentResponse) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+type TTRPCContent_ReadServer interface {
+ Send(*ReadContentResponse) error
+ ttrpc.StreamServer
+}
+
+type ttrpccontentReadServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpccontentReadServer) Send(m *ReadContentResponse) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+type TTRPCContent_WriteServer interface {
+ Send(*WriteContentResponse) error
+ Recv() (*WriteContentRequest, error)
+ ttrpc.StreamServer
+}
+
+type ttrpccontentWriteServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpccontentWriteServer) Send(m *WriteContentResponse) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+func (x *ttrpccontentWriteServer) Recv() (*WriteContentRequest, error) {
+ m := new(WriteContentRequest)
+ if err := x.StreamServer.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func RegisterTTRPCContentService(srv *ttrpc.Server, svc TTRPCContentService) {
+ srv.RegisterService("containerd.services.content.v1.Content", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Info": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req InfoRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Info(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UpdateRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteContentRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ "Status": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StatusRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Status(ctx, &req)
+ },
+ "ListStatuses": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListStatusesRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.ListStatuses(ctx, &req)
+ },
+ "Abort": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req AbortRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Abort(ctx, &req)
+ },
+ },
+ Streams: map[string]ttrpc.Stream{
+ "List": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ m := new(ListContentRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return nil, svc.List(ctx, m, &ttrpccontentListServer{stream})
+ },
+ StreamingClient: false,
+ StreamingServer: true,
+ },
+ "Read": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ m := new(ReadContentRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return nil, svc.Read(ctx, m, &ttrpccontentReadServer{stream})
+ },
+ StreamingClient: false,
+ StreamingServer: true,
+ },
+ "Write": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ return nil, svc.Write(ctx, &ttrpccontentWriteServer{stream})
+ },
+ StreamingClient: true,
+ StreamingServer: true,
+ },
+ },
+ })
+}
+
+type TTRPCContentClient interface {
+ Info(context.Context, *InfoRequest) (*InfoResponse, error)
+ Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
+ List(context.Context, *ListContentRequest) (TTRPCContent_ListClient, error)
+ Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
+ Read(context.Context, *ReadContentRequest) (TTRPCContent_ReadClient, error)
+ Status(context.Context, *StatusRequest) (*StatusResponse, error)
+ ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
+ Write(context.Context) (TTRPCContent_WriteClient, error)
+ Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
+}
+
+type ttrpccontentClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCContentClient(client *ttrpc.Client) TTRPCContentClient {
+ return &ttrpccontentClient{
+ client: client,
+ }
+}
+
+func (c *ttrpccontentClient) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) {
+ var resp InfoResponse
+ if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Info", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontentClient) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) {
+ var resp UpdateResponse
+ if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontentClient) List(ctx context.Context, req *ListContentRequest) (TTRPCContent_ListClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: false,
+ StreamingServer: true,
+ }, "containerd.services.content.v1.Content", "List", req)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpccontentListClient{stream}
+ return x, nil
+}
+
+type TTRPCContent_ListClient interface {
+ Recv() (*ListContentResponse, error)
+ ttrpc.ClientStream
+}
+
+type ttrpccontentListClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpccontentListClient) Recv() (*ListContentResponse, error) {
+ m := new(ListContentResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *ttrpccontentClient) Delete(ctx context.Context, req *DeleteContentRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontentClient) Read(ctx context.Context, req *ReadContentRequest) (TTRPCContent_ReadClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: false,
+ StreamingServer: true,
+ }, "containerd.services.content.v1.Content", "Read", req)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpccontentReadClient{stream}
+ return x, nil
+}
+
+type TTRPCContent_ReadClient interface {
+ Recv() (*ReadContentResponse, error)
+ ttrpc.ClientStream
+}
+
+type ttrpccontentReadClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpccontentReadClient) Recv() (*ReadContentResponse, error) {
+ m := new(ReadContentResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *ttrpccontentClient) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+ var resp StatusResponse
+ if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Status", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontentClient) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) {
+ var resp ListStatusesResponse
+ if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "ListStatuses", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontentClient) Write(ctx context.Context) (TTRPCContent_WriteClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: true,
+ StreamingServer: true,
+ }, "containerd.services.content.v1.Content", "Write", nil)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpccontentWriteClient{stream}
+ return x, nil
+}
+
+type TTRPCContent_WriteClient interface {
+ Send(*WriteContentRequest) error
+ Recv() (*WriteContentResponse, error)
+ ttrpc.ClientStream
+}
+
+type ttrpccontentWriteClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpccontentWriteClient) Send(m *WriteContentRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *ttrpccontentWriteClient) Recv() (*WriteContentResponse, error) {
+ m := new(WriteContentResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *ttrpccontentClient) Abort(ctx context.Context, req *AbortRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Abort", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
index 490c2dc0c98f..1c8db0b0219d 100644
--- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
@@ -181,8 +181,11 @@ type DiffRequest struct {
// Labels are the labels to apply to the generated content
// on content store commit.
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // SourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility.
+ // SourceDateEpoch specifies the timestamp used to provide control for reproducibility.
// See also https://reproducible-builds.org/docs/source-date-epoch/ .
+ //
+ // Since containerd v2.0, the whiteout timestamps are set to zero (1970-01-01),
+ // not to the source date epoch.
SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"`
}
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
index c5d7e43ee478..2aa5ad8a1109 100644
--- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
@@ -76,8 +76,11 @@ message DiffRequest {
// on content store commit.
map labels = 5;
- // SourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility.
+ // SourceDateEpoch specifies the timestamp used to provide control for reproducibility.
// See also https://reproducible-builds.org/docs/source-date-epoch/ .
+ //
+ // Since containerd v2.0, the whiteout timestamps are set to zero (1970-01-01),
+ // not to the source date epoch.
google.protobuf.Timestamp source_date_epoch = 6;
}
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_ttrpc.pb.go
new file mode 100644
index 000000000000..0aa063fb3d6c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_ttrpc.pb.go
@@ -0,0 +1,60 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
+package diff
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+)
+
+type TTRPCDiffService interface {
+ Apply(context.Context, *ApplyRequest) (*ApplyResponse, error)
+ Diff(context.Context, *DiffRequest) (*DiffResponse, error)
+}
+
+func RegisterTTRPCDiffService(srv *ttrpc.Server, svc TTRPCDiffService) {
+ srv.RegisterService("containerd.services.diff.v1.Diff", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Apply": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ApplyRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Apply(ctx, &req)
+ },
+ "Diff": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DiffRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Diff(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcdiffClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCDiffClient(client *ttrpc.Client) TTRPCDiffService {
+ return &ttrpcdiffClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcdiffClient) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ var resp ApplyResponse
+ if err := c.client.Call(ctx, "containerd.services.diff.v1.Diff", "Apply", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcdiffClient) Diff(ctx context.Context, req *DiffRequest) (*DiffResponse, error) {
+ var resp DiffResponse
+ if err := c.client.Call(ctx, "containerd.services.diff.v1.Diff", "Diff", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
index b7f86da86951..4470a663e8e2 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
@@ -16,3 +16,8 @@
// Package events defines the event pushing and subscription service.
package events
+
+import types "github.com/containerd/containerd/api/types"
+
+// Deprecated: Use [types.Envelope].
+type Envelope = types.Envelope
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
index 054d36ee20b7..9a4f09328ac6 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
@@ -22,12 +22,11 @@
package events
import (
- _ "github.com/containerd/containerd/api/types"
+ types "github.com/containerd/containerd/api/types"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
emptypb "google.golang.org/protobuf/types/known/emptypb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
@@ -99,7 +98,7 @@ type ForwardRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
+ Envelope *types.Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
}
func (x *ForwardRequest) Reset() {
@@ -134,7 +133,7 @@ func (*ForwardRequest) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{1}
}
-func (x *ForwardRequest) GetEnvelope() *Envelope {
+func (x *ForwardRequest) GetEnvelope() *types.Envelope {
if x != nil {
return x.Envelope
}
@@ -188,77 +187,6 @@ func (x *SubscribeRequest) GetFilters() []string {
return nil
}
-type Envelope struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
- Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
-}
-
-func (x *Envelope) Reset() {
- *x = Envelope{}
- if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Envelope) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Envelope) ProtoMessage() {}
-
-func (x *Envelope) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
-func (*Envelope) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Envelope) GetTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-
-func (x *Envelope) GetNamespace() string {
- if x != nil {
- return x.Namespace
- }
- return ""
-}
-
-func (x *Envelope) GetTopic() string {
- if x != nil {
- return x.Topic
- }
- return ""
-}
-
-func (x *Envelope) GetEvent() *anypb.Any {
- if x != nil {
- return x.Event
- }
- return nil
-}
-
var File_github_com_containerd_containerd_api_services_events_v1_events_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc = []byte{
@@ -268,63 +196,48 @@ var file_github_com_containerd_containerd_api_services_events_v1_events_proto_ra
0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d,
- 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x52, 0x0a, 0x0e, 0x50, 0x75,
- 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05,
- 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x55,
- 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x43, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e,
- 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x76,
- 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74,
- 0x65, 0x72, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65,
- 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
- 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69,
- 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a,
- 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01,
- 0x32, 0x95, 0x02, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x50,
- 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
- 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x50, 0x0a,
- 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12,
- 0x67, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x2f, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62,
- 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e,
+ 0x73, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61,
+ 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x52, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a,
+ 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x48, 0x0a, 0x0e, 0x46, 0x6f, 0x72,
+ 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x65,
+ 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c,
+ 0x6f, 0x70, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x73, 0x32, 0x88, 0x02, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x50,
+ 0x0a, 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x12, 0x5a, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x2f, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e,
- 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x30, 0x01, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
- 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
- 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
+ 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x30, 0x01, 0x42, 0x40, 0x5a, 0x3e,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -339,32 +252,29 @@ func file_github_com_containerd_containerd_api_services_events_v1_events_proto_r
return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData
}
-var file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes = []interface{}{
- (*PublishRequest)(nil), // 0: containerd.services.events.v1.PublishRequest
- (*ForwardRequest)(nil), // 1: containerd.services.events.v1.ForwardRequest
- (*SubscribeRequest)(nil), // 2: containerd.services.events.v1.SubscribeRequest
- (*Envelope)(nil), // 3: containerd.services.events.v1.Envelope
- (*anypb.Any)(nil), // 4: google.protobuf.Any
- (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp
- (*emptypb.Empty)(nil), // 6: google.protobuf.Empty
+ (*PublishRequest)(nil), // 0: containerd.services.events.v1.PublishRequest
+ (*ForwardRequest)(nil), // 1: containerd.services.events.v1.ForwardRequest
+ (*SubscribeRequest)(nil), // 2: containerd.services.events.v1.SubscribeRequest
+ (*anypb.Any)(nil), // 3: google.protobuf.Any
+ (*types.Envelope)(nil), // 4: containerd.types.Envelope
+ (*emptypb.Empty)(nil), // 5: google.protobuf.Empty
}
var file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs = []int32{
- 4, // 0: containerd.services.events.v1.PublishRequest.event:type_name -> google.protobuf.Any
- 3, // 1: containerd.services.events.v1.ForwardRequest.envelope:type_name -> containerd.services.events.v1.Envelope
- 5, // 2: containerd.services.events.v1.Envelope.timestamp:type_name -> google.protobuf.Timestamp
- 4, // 3: containerd.services.events.v1.Envelope.event:type_name -> google.protobuf.Any
- 0, // 4: containerd.services.events.v1.Events.Publish:input_type -> containerd.services.events.v1.PublishRequest
- 1, // 5: containerd.services.events.v1.Events.Forward:input_type -> containerd.services.events.v1.ForwardRequest
- 2, // 6: containerd.services.events.v1.Events.Subscribe:input_type -> containerd.services.events.v1.SubscribeRequest
- 6, // 7: containerd.services.events.v1.Events.Publish:output_type -> google.protobuf.Empty
- 6, // 8: containerd.services.events.v1.Events.Forward:output_type -> google.protobuf.Empty
- 3, // 9: containerd.services.events.v1.Events.Subscribe:output_type -> containerd.services.events.v1.Envelope
- 7, // [7:10] is the sub-list for method output_type
- 4, // [4:7] is the sub-list for method input_type
- 4, // [4:4] is the sub-list for extension type_name
- 4, // [4:4] is the sub-list for extension extendee
- 0, // [0:4] is the sub-list for field type_name
+ 3, // 0: containerd.services.events.v1.PublishRequest.event:type_name -> google.protobuf.Any
+ 4, // 1: containerd.services.events.v1.ForwardRequest.envelope:type_name -> containerd.types.Envelope
+ 0, // 2: containerd.services.events.v1.Events.Publish:input_type -> containerd.services.events.v1.PublishRequest
+ 1, // 3: containerd.services.events.v1.Events.Forward:input_type -> containerd.services.events.v1.ForwardRequest
+ 2, // 4: containerd.services.events.v1.Events.Subscribe:input_type -> containerd.services.events.v1.SubscribeRequest
+ 5, // 5: containerd.services.events.v1.Events.Publish:output_type -> google.protobuf.Empty
+ 5, // 6: containerd.services.events.v1.Events.Forward:output_type -> google.protobuf.Empty
+ 4, // 7: containerd.services.events.v1.Events.Subscribe:output_type -> containerd.types.Envelope
+ 5, // [5:8] is the sub-list for method output_type
+ 2, // [2:5] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_services_events_v1_events_proto_init() }
@@ -409,18 +319,6 @@ func file_github_com_containerd_containerd_api_services_events_v1_events_proto_i
return nil
}
}
- file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Envelope); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -428,7 +326,7 @@ func file_github_com_containerd_containerd_api_services_events_v1_events_proto_i
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc,
NumEnums: 0,
- NumMessages: 4,
+ NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
index f22575996786..3ea43e38acd7 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
@@ -18,10 +18,9 @@ syntax = "proto3";
package containerd.services.events.v1;
-import "github.com/containerd/containerd/api/types/fieldpath.proto";
+import "github.com/containerd/containerd/api/types/event.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
-import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
@@ -46,7 +45,7 @@ service Events {
// from all namespaces unless otherwise specified. If this is not desired,
// a filter can be provided in the format 'namespace==' to
// restrict the received events.
- rpc Subscribe(SubscribeRequest) returns (stream Envelope);
+ rpc Subscribe(SubscribeRequest) returns (stream containerd.types.Envelope);
}
message PublishRequest {
@@ -55,17 +54,9 @@ message PublishRequest {
}
message ForwardRequest {
- Envelope envelope = 1;
+ containerd.types.Envelope envelope = 1;
}
message SubscribeRequest {
repeated string filters = 1;
}
-
-message Envelope {
- option (containerd.types.fieldpath) = true;
- google.protobuf.Timestamp timestamp = 1;
- string namespace = 2;
- string topic = 3;
- google.protobuf.Any event = 4;
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go
index 768306555cf1..acc9cdbb9344 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go
@@ -8,6 +8,7 @@ package events
import (
context "context"
+ types "github.com/containerd/containerd/api/types"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
@@ -86,7 +87,7 @@ func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts
}
type Events_SubscribeClient interface {
- Recv() (*Envelope, error)
+ Recv() (*types.Envelope, error)
grpc.ClientStream
}
@@ -94,8 +95,8 @@ type eventsSubscribeClient struct {
grpc.ClientStream
}
-func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
- m := new(Envelope)
+func (x *eventsSubscribeClient) Recv() (*types.Envelope, error) {
+ m := new(types.Envelope)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
@@ -199,7 +200,7 @@ func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error
}
type Events_SubscribeServer interface {
- Send(*Envelope) error
+ Send(*types.Envelope) error
grpc.ServerStream
}
@@ -207,7 +208,7 @@ type eventsSubscribeServer struct {
grpc.ServerStream
}
-func (x *eventsSubscribeServer) Send(m *Envelope) error {
+func (x *eventsSubscribeServer) Send(m *types.Envelope) error {
return x.ServerStream.SendMsg(m)
}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events_ttrpc.pb.go
new file mode 100644
index 000000000000..ad8e3fecb902
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events_ttrpc.pb.go
@@ -0,0 +1,124 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/events/v1/events.proto
+package events
+
+import (
+ context "context"
+ types "github.com/containerd/containerd/api/types"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCEventsService interface {
+ Publish(context.Context, *PublishRequest) (*emptypb.Empty, error)
+ Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error)
+ Subscribe(context.Context, *SubscribeRequest, TTRPCEvents_SubscribeServer) error
+}
+
+type TTRPCEvents_SubscribeServer interface {
+ Send(*types.Envelope) error
+ ttrpc.StreamServer
+}
+
+type ttrpceventsSubscribeServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpceventsSubscribeServer) Send(m *types.Envelope) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+func RegisterTTRPCEventsService(srv *ttrpc.Server, svc TTRPCEventsService) {
+ srv.RegisterService("containerd.services.events.v1.Events", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Publish": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req PublishRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Publish(ctx, &req)
+ },
+ "Forward": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ForwardRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Forward(ctx, &req)
+ },
+ },
+ Streams: map[string]ttrpc.Stream{
+ "Subscribe": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ m := new(SubscribeRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return nil, svc.Subscribe(ctx, m, &ttrpceventsSubscribeServer{stream})
+ },
+ StreamingClient: false,
+ StreamingServer: true,
+ },
+ },
+ })
+}
+
+type TTRPCEventsClient interface {
+ Publish(context.Context, *PublishRequest) (*emptypb.Empty, error)
+ Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error)
+ Subscribe(context.Context, *SubscribeRequest) (TTRPCEvents_SubscribeClient, error)
+}
+
+type ttrpceventsClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCEventsClient(client *ttrpc.Client) TTRPCEventsClient {
+ return &ttrpceventsClient{
+ client: client,
+ }
+}
+
+func (c *ttrpceventsClient) Publish(ctx context.Context, req *PublishRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.events.v1.Events", "Publish", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpceventsClient) Forward(ctx context.Context, req *ForwardRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.events.v1.Events", "Forward", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpceventsClient) Subscribe(ctx context.Context, req *SubscribeRequest) (TTRPCEvents_SubscribeClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: false,
+ StreamingServer: true,
+ }, "containerd.services.events.v1.Events", "Subscribe", req)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpceventsSubscribeClient{stream}
+ return x, nil
+}
+
+type TTRPCEvents_SubscribeClient interface {
+ Recv() (*types.Envelope, error)
+ ttrpc.ClientStream
+}
+
+type ttrpceventsSubscribeClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpceventsSubscribeClient) Recv() (*types.Envelope, error) {
+ m := new(types.Envelope)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
index 52aff3dd2f1c..ef5b7faa2c29 100644
--- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
@@ -555,6 +555,11 @@ type DeleteImageRequest struct {
//
// Default is false
Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
+ // Target value for image to be deleted
+ //
+ // If image descriptor does not match the same digest,
+ // the delete operation will return "not found" error.
+ Target *types.Descriptor `protobuf:"bytes,3,opt,name=target,proto3,oneof" json:"target,omitempty"`
}
func (x *DeleteImageRequest) Reset() {
@@ -603,6 +608,13 @@ func (x *DeleteImageRequest) GetSync() bool {
return false
}
+func (x *DeleteImageRequest) GetTarget() *types.Descriptor {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
var File_github_com_containerd_containerd_api_services_images_v1_images_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc = []byte{
@@ -692,49 +704,53 @@ var file_github_com_containerd_containerd_api_services_images_v1_images_proto_ra
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61,
0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x06, 0x69, 0x6d,
- 0x61, 0x67, 0x65, 0x73, 0x22, 0x3c, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d,
- 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12,
- 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x79,
- 0x6e, 0x63, 0x32, 0x94, 0x04, 0x0a, 0x06, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x12, 0x66, 0x0a,
- 0x03, 0x47, 0x65, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x61, 0x67, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49,
+ 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73,
+ 0x79, 0x6e, 0x63, 0x12, 0x39, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x48, 0x00, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x09,
+ 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x94, 0x04, 0x0a, 0x06, 0x49, 0x6d,
+ 0x61, 0x67, 0x65, 0x73, 0x12, 0x66, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49,
+ 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49,
+ 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04,
+ 0x4c, 0x69, 0x73, 0x74, 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65,
- 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61,
+ 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65,
- 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x30, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
- 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x31,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74,
- 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70,
- 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65,
- 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d,
+ 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d,
+ 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x06, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67,
+ 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6d, 0x61, 0x67,
+ 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -781,21 +797,22 @@ var file_github_com_containerd_containerd_api_services_images_v1_images_proto_de
12, // 10: containerd.services.images.v1.UpdateImageRequest.source_date_epoch:type_name -> google.protobuf.Timestamp
0, // 11: containerd.services.images.v1.UpdateImageResponse.image:type_name -> containerd.services.images.v1.Image
0, // 12: containerd.services.images.v1.ListImagesResponse.images:type_name -> containerd.services.images.v1.Image
- 1, // 13: containerd.services.images.v1.Images.Get:input_type -> containerd.services.images.v1.GetImageRequest
- 7, // 14: containerd.services.images.v1.Images.List:input_type -> containerd.services.images.v1.ListImagesRequest
- 3, // 15: containerd.services.images.v1.Images.Create:input_type -> containerd.services.images.v1.CreateImageRequest
- 5, // 16: containerd.services.images.v1.Images.Update:input_type -> containerd.services.images.v1.UpdateImageRequest
- 9, // 17: containerd.services.images.v1.Images.Delete:input_type -> containerd.services.images.v1.DeleteImageRequest
- 2, // 18: containerd.services.images.v1.Images.Get:output_type -> containerd.services.images.v1.GetImageResponse
- 8, // 19: containerd.services.images.v1.Images.List:output_type -> containerd.services.images.v1.ListImagesResponse
- 4, // 20: containerd.services.images.v1.Images.Create:output_type -> containerd.services.images.v1.CreateImageResponse
- 6, // 21: containerd.services.images.v1.Images.Update:output_type -> containerd.services.images.v1.UpdateImageResponse
- 14, // 22: containerd.services.images.v1.Images.Delete:output_type -> google.protobuf.Empty
- 18, // [18:23] is the sub-list for method output_type
- 13, // [13:18] is the sub-list for method input_type
- 13, // [13:13] is the sub-list for extension type_name
- 13, // [13:13] is the sub-list for extension extendee
- 0, // [0:13] is the sub-list for field type_name
+ 11, // 13: containerd.services.images.v1.DeleteImageRequest.target:type_name -> containerd.types.Descriptor
+ 1, // 14: containerd.services.images.v1.Images.Get:input_type -> containerd.services.images.v1.GetImageRequest
+ 7, // 15: containerd.services.images.v1.Images.List:input_type -> containerd.services.images.v1.ListImagesRequest
+ 3, // 16: containerd.services.images.v1.Images.Create:input_type -> containerd.services.images.v1.CreateImageRequest
+ 5, // 17: containerd.services.images.v1.Images.Update:input_type -> containerd.services.images.v1.UpdateImageRequest
+ 9, // 18: containerd.services.images.v1.Images.Delete:input_type -> containerd.services.images.v1.DeleteImageRequest
+ 2, // 19: containerd.services.images.v1.Images.Get:output_type -> containerd.services.images.v1.GetImageResponse
+ 8, // 20: containerd.services.images.v1.Images.List:output_type -> containerd.services.images.v1.ListImagesResponse
+ 4, // 21: containerd.services.images.v1.Images.Create:output_type -> containerd.services.images.v1.CreateImageResponse
+ 6, // 22: containerd.services.images.v1.Images.Update:output_type -> containerd.services.images.v1.UpdateImageResponse
+ 14, // 23: containerd.services.images.v1.Images.Delete:output_type -> google.protobuf.Empty
+ 19, // [19:24] is the sub-list for method output_type
+ 14, // [14:19] is the sub-list for method input_type
+ 14, // [14:14] is the sub-list for extension type_name
+ 14, // [14:14] is the sub-list for extension extendee
+ 0, // [0:14] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_services_images_v1_images_proto_init() }
@@ -925,6 +942,7 @@ func file_github_com_containerd_containerd_api_services_images_v1_images_proto_i
}
}
}
+ file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
index b32df4b051cf..2f47ab284de9 100644
--- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
@@ -140,4 +140,10 @@ message DeleteImageRequest {
//
// Default is false
bool sync = 2;
+
+ // Target value for image to be deleted
+ //
+ // If image descriptor does not match the same digest,
+ // the delete operation will return "not found" error.
+ optional containerd.types.Descriptor target = 3;
}
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images_ttrpc.pb.go
new file mode 100644
index 000000000000..442e1ac5c1a7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images_ttrpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/images/v1/images.proto
+package images
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCImagesService interface {
+ Get(context.Context, *GetImageRequest) (*GetImageResponse, error)
+ List(context.Context, *ListImagesRequest) (*ListImagesResponse, error)
+ Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error)
+ Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error)
+ Delete(context.Context, *DeleteImageRequest) (*emptypb.Empty, error)
+}
+
+func RegisterTTRPCImagesService(srv *ttrpc.Server, svc TTRPCImagesService) {
+ srv.RegisterService("containerd.services.images.v1.Images", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req GetImageRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Get(ctx, &req)
+ },
+ "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListImagesRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.List(ctx, &req)
+ },
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CreateImageRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UpdateImageRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteImageRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcimagesClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCImagesClient(client *ttrpc.Client) TTRPCImagesService {
+ return &ttrpcimagesClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcimagesClient) Get(ctx context.Context, req *GetImageRequest) (*GetImageResponse, error) {
+ var resp GetImageResponse
+ if err := c.client.Call(ctx, "containerd.services.images.v1.Images", "Get", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcimagesClient) List(ctx context.Context, req *ListImagesRequest) (*ListImagesResponse, error) {
+ var resp ListImagesResponse
+ if err := c.client.Call(ctx, "containerd.services.images.v1.Images", "List", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcimagesClient) Create(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error) {
+ var resp CreateImageResponse
+ if err := c.client.Call(ctx, "containerd.services.images.v1.Images", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcimagesClient) Update(ctx context.Context, req *UpdateImageRequest) (*UpdateImageResponse, error) {
+ var resp UpdateImageResponse
+ if err := c.client.Call(ctx, "containerd.services.images.v1.Images", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcimagesClient) Delete(ctx context.Context, req *DeleteImageRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.images.v1.Images", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
index 4e6c545823d2..f6f65eadfd27 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
@@ -15,9 +15,3 @@
*/
package introspection
-
-// Not implemented types introduced in later versions and included for API compatibility
-// Use of these types should only use not implemented errors
-
-type PluginInfoRequest struct{}
-type PluginInfoResponse struct{}
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
index 7768f81313ec..0d2f8c62324b 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
@@ -26,6 +26,7 @@ import (
status "google.golang.org/genproto/googleapis/rpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
emptypb "google.golang.org/protobuf/types/known/emptypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
@@ -402,6 +403,128 @@ func (x *DeprecationWarning) GetLastOccurrence() *timestamppb.Timestamp {
return nil
}
+type PluginInfoRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Options may be used to request extra dynamic information from
+ // a plugin.
+ // This object is determined by the plugin and the plugin may return
+ // NotImplemented or InvalidArgument if it is not supported
+ Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
+}
+
+func (x *PluginInfoRequest) Reset() {
+ *x = PluginInfoRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PluginInfoRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PluginInfoRequest) ProtoMessage() {}
+
+func (x *PluginInfoRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PluginInfoRequest.ProtoReflect.Descriptor instead.
+func (*PluginInfoRequest) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *PluginInfoRequest) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *PluginInfoRequest) GetID() string {
+ if x != nil {
+ return x.ID
+ }
+ return ""
+}
+
+func (x *PluginInfoRequest) GetOptions() *anypb.Any {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+type PluginInfoResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Plugin *Plugin `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"`
+ Extra *anypb.Any `protobuf:"bytes,2,opt,name=extra,proto3" json:"extra,omitempty"`
+}
+
+func (x *PluginInfoResponse) Reset() {
+ *x = PluginInfoResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PluginInfoResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PluginInfoResponse) ProtoMessage() {}
+
+func (x *PluginInfoResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PluginInfoResponse.ProtoReflect.Descriptor instead.
+func (*PluginInfoResponse) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *PluginInfoResponse) GetPlugin() *Plugin {
+ if x != nil {
+ return x.Plugin
+ }
+ return nil
+}
+
+func (x *PluginInfoResponse) GetExtra() *anypb.Any {
+ if x != nil {
+ return x.Extra
+ }
+ return nil
+}
+
var File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc = []byte{
@@ -412,86 +535,115 @@ var file_github_com_containerd_containerd_api_services_introspection_v1_introspe
0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x24, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73,
- 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
- 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
- 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
- 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a,
- 0x06, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72,
- 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72,
- 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66,
- 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c,
- 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
- 0x73, 0x12, 0x53, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65,
- 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69,
- 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61,
- 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x69, 0x6e,
- 0x69, 0x74, 0x5f, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x07, 0x69, 0x6e, 0x69, 0x74, 0x45, 0x72, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x78, 0x70,
- 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
- 0x73, 0x22, 0x59, 0x0a, 0x0f, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72,
- 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0xaa, 0x01, 0x0a,
- 0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75,
- 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x12, 0x5c, 0x0a, 0x0c, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0c, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x44, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
- 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
- 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x0f, 0x6c, 0x61,
- 0x73, 0x74, 0x5f, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x32,
- 0xdf, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x76, 0x0a, 0x07, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x2e, 0x63,
+ 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x06, 0x50, 0x6c, 0x75, 0x67,
+ 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
+ 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x53, 0x0a, 0x07,
+ 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x6f,
+ 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74,
+ 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c,
+ 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x65, 0x72,
+ 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x69, 0x6e, 0x69,
+ 0x74, 0x45, 0x72, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x22, 0x2a, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x59, 0x0a, 0x0f,
+ 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x46, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x07,
+ 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x10,
+ 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x70, 0x69, 0x64,
+ 0x12, 0x14, 0x0a, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x12, 0x5c, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x63,
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x53, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57,
+ 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x63,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74,
+ 0x4f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x67, 0x0a, 0x11, 0x50, 0x6c,
+ 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e,
+ 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x6c,
+ 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
+ 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x32, 0xe0, 0x02, 0x0a,
+ 0x0d, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x76,
+ 0x0a, 0x07, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+ 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31,
+ 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69,
+ 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f,
+ 0x0a, 0x0a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x37, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72,
+ 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75,
+ 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
+ 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76,
+ 0x31, 0x3b, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -506,35 +658,43 @@ func file_github_com_containerd_containerd_api_services_introspection_v1_introsp
return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData
}
-var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes = []interface{}{
(*Plugin)(nil), // 0: containerd.services.introspection.v1.Plugin
(*PluginsRequest)(nil), // 1: containerd.services.introspection.v1.PluginsRequest
(*PluginsResponse)(nil), // 2: containerd.services.introspection.v1.PluginsResponse
(*ServerResponse)(nil), // 3: containerd.services.introspection.v1.ServerResponse
(*DeprecationWarning)(nil), // 4: containerd.services.introspection.v1.DeprecationWarning
- nil, // 5: containerd.services.introspection.v1.Plugin.ExportsEntry
- (*types.Platform)(nil), // 6: containerd.types.Platform
- (*status.Status)(nil), // 7: google.rpc.Status
- (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
- (*emptypb.Empty)(nil), // 9: google.protobuf.Empty
+ (*PluginInfoRequest)(nil), // 5: containerd.services.introspection.v1.PluginInfoRequest
+ (*PluginInfoResponse)(nil), // 6: containerd.services.introspection.v1.PluginInfoResponse
+ nil, // 7: containerd.services.introspection.v1.Plugin.ExportsEntry
+ (*types.Platform)(nil), // 8: containerd.types.Platform
+ (*status.Status)(nil), // 9: google.rpc.Status
+ (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp
+ (*anypb.Any)(nil), // 11: google.protobuf.Any
+ (*emptypb.Empty)(nil), // 12: google.protobuf.Empty
}
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs = []int32{
- 6, // 0: containerd.services.introspection.v1.Plugin.platforms:type_name -> containerd.types.Platform
- 5, // 1: containerd.services.introspection.v1.Plugin.exports:type_name -> containerd.services.introspection.v1.Plugin.ExportsEntry
- 7, // 2: containerd.services.introspection.v1.Plugin.init_err:type_name -> google.rpc.Status
- 0, // 3: containerd.services.introspection.v1.PluginsResponse.plugins:type_name -> containerd.services.introspection.v1.Plugin
- 4, // 4: containerd.services.introspection.v1.ServerResponse.deprecations:type_name -> containerd.services.introspection.v1.DeprecationWarning
- 8, // 5: containerd.services.introspection.v1.DeprecationWarning.last_occurrence:type_name -> google.protobuf.Timestamp
- 1, // 6: containerd.services.introspection.v1.Introspection.Plugins:input_type -> containerd.services.introspection.v1.PluginsRequest
- 9, // 7: containerd.services.introspection.v1.Introspection.Server:input_type -> google.protobuf.Empty
- 2, // 8: containerd.services.introspection.v1.Introspection.Plugins:output_type -> containerd.services.introspection.v1.PluginsResponse
- 3, // 9: containerd.services.introspection.v1.Introspection.Server:output_type -> containerd.services.introspection.v1.ServerResponse
- 8, // [8:10] is the sub-list for method output_type
- 6, // [6:8] is the sub-list for method input_type
- 6, // [6:6] is the sub-list for extension type_name
- 6, // [6:6] is the sub-list for extension extendee
- 0, // [0:6] is the sub-list for field type_name
+ 8, // 0: containerd.services.introspection.v1.Plugin.platforms:type_name -> containerd.types.Platform
+ 7, // 1: containerd.services.introspection.v1.Plugin.exports:type_name -> containerd.services.introspection.v1.Plugin.ExportsEntry
+ 9, // 2: containerd.services.introspection.v1.Plugin.init_err:type_name -> google.rpc.Status
+ 0, // 3: containerd.services.introspection.v1.PluginsResponse.plugins:type_name -> containerd.services.introspection.v1.Plugin
+ 4, // 4: containerd.services.introspection.v1.ServerResponse.deprecations:type_name -> containerd.services.introspection.v1.DeprecationWarning
+ 10, // 5: containerd.services.introspection.v1.DeprecationWarning.last_occurrence:type_name -> google.protobuf.Timestamp
+ 11, // 6: containerd.services.introspection.v1.PluginInfoRequest.options:type_name -> google.protobuf.Any
+ 0, // 7: containerd.services.introspection.v1.PluginInfoResponse.plugin:type_name -> containerd.services.introspection.v1.Plugin
+ 11, // 8: containerd.services.introspection.v1.PluginInfoResponse.extra:type_name -> google.protobuf.Any
+ 1, // 9: containerd.services.introspection.v1.Introspection.Plugins:input_type -> containerd.services.introspection.v1.PluginsRequest
+ 12, // 10: containerd.services.introspection.v1.Introspection.Server:input_type -> google.protobuf.Empty
+ 5, // 11: containerd.services.introspection.v1.Introspection.PluginInfo:input_type -> containerd.services.introspection.v1.PluginInfoRequest
+ 2, // 12: containerd.services.introspection.v1.Introspection.Plugins:output_type -> containerd.services.introspection.v1.PluginsResponse
+ 3, // 13: containerd.services.introspection.v1.Introspection.Server:output_type -> containerd.services.introspection.v1.ServerResponse
+ 6, // 14: containerd.services.introspection.v1.Introspection.PluginInfo:output_type -> containerd.services.introspection.v1.PluginInfoResponse
+ 12, // [12:15] is the sub-list for method output_type
+ 9, // [9:12] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
}
func init() {
@@ -605,6 +765,30 @@ func file_github_com_containerd_containerd_api_services_introspection_v1_introsp
return nil
}
}
+ file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PluginInfoRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PluginInfoResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -612,7 +796,7 @@ func file_github_com_containerd_containerd_api_services_introspection_v1_introsp
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc,
NumEnums: 0,
- NumMessages: 6,
+ NumMessages: 8,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
index f27f1912e79d..12fbcf5b96a2 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
@@ -18,6 +18,8 @@ syntax = "proto3";
package containerd.services.introspection.v1;
+import "google/protobuf/any.proto";
+import "github.com/containerd/containerd/api/types/introspection.proto";
import "github.com/containerd/containerd/api/types/platform.proto";
import "google/rpc/status.proto";
import "google/protobuf/empty.proto";
@@ -33,6 +35,8 @@ service Introspection {
rpc Plugins(PluginsRequest) returns (PluginsResponse);
// Server returns information about the containerd server
rpc Server(google.protobuf.Empty) returns (ServerResponse);
+ // PluginInfo returns information directly from a plugin if the plugin supports it
+ rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse);
}
message Plugin {
@@ -110,4 +114,20 @@ message DeprecationWarning {
string id = 1;
string message = 2;
google.protobuf.Timestamp last_occurrence = 3;
-}
\ No newline at end of file
+}
+
+message PluginInfoRequest {
+ string type = 1;
+ string id = 2;
+
+ // Options may be used to request extra dynamic information from
+ // a plugin.
+ // This object is determined by the plugin and the plugin may return
+ // NotImplemented or InvalidArgument if it is not supported
+ google.protobuf.Any options = 3;
+}
+
+message PluginInfoResponse {
+ Plugin plugin = 1;
+ google.protobuf.Any extra = 2;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go
index c2cf80765ccb..6a7606380a3f 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go
@@ -30,6 +30,8 @@ type IntrospectionClient interface {
Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error)
// Server returns information about the containerd server
Server(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServerResponse, error)
+ // PluginInfo returns information directly from a plugin if the plugin supports it
+ PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error)
}
type introspectionClient struct {
@@ -58,6 +60,15 @@ func (c *introspectionClient) Server(ctx context.Context, in *emptypb.Empty, opt
return out, nil
}
+func (c *introspectionClient) PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error) {
+ out := new(PluginInfoResponse)
+ err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/PluginInfo", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// IntrospectionServer is the server API for Introspection service.
// All implementations must embed UnimplementedIntrospectionServer
// for forward compatibility
@@ -69,6 +80,8 @@ type IntrospectionServer interface {
Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
// Server returns information about the containerd server
Server(context.Context, *emptypb.Empty) (*ServerResponse, error)
+ // PluginInfo returns information directly from a plugin if the plugin supports it
+ PluginInfo(context.Context, *PluginInfoRequest) (*PluginInfoResponse, error)
mustEmbedUnimplementedIntrospectionServer()
}
@@ -82,6 +95,9 @@ func (UnimplementedIntrospectionServer) Plugins(context.Context, *PluginsRequest
func (UnimplementedIntrospectionServer) Server(context.Context, *emptypb.Empty) (*ServerResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Server not implemented")
}
+func (UnimplementedIntrospectionServer) PluginInfo(context.Context, *PluginInfoRequest) (*PluginInfoResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PluginInfo not implemented")
+}
func (UnimplementedIntrospectionServer) mustEmbedUnimplementedIntrospectionServer() {}
// UnsafeIntrospectionServer may be embedded to opt out of forward compatibility for this service.
@@ -131,6 +147,24 @@ func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec fun
return interceptor(ctx, in, info, handler)
}
+func _Introspection_PluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PluginInfoRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(IntrospectionServer).PluginInfo(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/containerd.services.introspection.v1.Introspection/PluginInfo",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(IntrospectionServer).PluginInfo(ctx, req.(*PluginInfoRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// Introspection_ServiceDesc is the grpc.ServiceDesc for Introspection service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -146,6 +180,10 @@ var Introspection_ServiceDesc = grpc.ServiceDesc{
MethodName: "Server",
Handler: _Introspection_Server_Handler,
},
+ {
+ MethodName: "PluginInfo",
+ Handler: _Introspection_PluginInfo_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_ttrpc.pb.go
new file mode 100644
index 000000000000..d8027d5f943f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_ttrpc.pb.go
@@ -0,0 +1,77 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
+package introspection
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCIntrospectionService interface {
+ Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
+ Server(context.Context, *emptypb.Empty) (*ServerResponse, error)
+ PluginInfo(context.Context, *PluginInfoRequest) (*PluginInfoResponse, error)
+}
+
+func RegisterTTRPCIntrospectionService(srv *ttrpc.Server, svc TTRPCIntrospectionService) {
+ srv.RegisterService("containerd.services.introspection.v1.Introspection", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Plugins": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req PluginsRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Plugins(ctx, &req)
+ },
+ "Server": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req emptypb.Empty
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Server(ctx, &req)
+ },
+ "PluginInfo": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req PluginInfoRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.PluginInfo(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcintrospectionClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCIntrospectionClient(client *ttrpc.Client) TTRPCIntrospectionService {
+ return &ttrpcintrospectionClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcintrospectionClient) Plugins(ctx context.Context, req *PluginsRequest) (*PluginsResponse, error) {
+ var resp PluginsResponse
+ if err := c.client.Call(ctx, "containerd.services.introspection.v1.Introspection", "Plugins", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcintrospectionClient) Server(ctx context.Context, req *emptypb.Empty) (*ServerResponse, error) {
+ var resp ServerResponse
+ if err := c.client.Call(ctx, "containerd.services.introspection.v1.Introspection", "Server", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcintrospectionClient) PluginInfo(ctx context.Context, req *PluginInfoRequest) (*PluginInfoResponse, error) {
+ var resp PluginInfoResponse
+ if err := c.client.Call(ctx, "containerd.services.introspection.v1.Introspection", "PluginInfo", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_ttrpc.pb.go
new file mode 100644
index 000000000000..af63dc0907bc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_ttrpc.pb.go
@@ -0,0 +1,125 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/leases/v1/leases.proto
+package leases
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCLeasesService interface {
+ Create(context.Context, *CreateRequest) (*CreateResponse, error)
+ Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error)
+ List(context.Context, *ListRequest) (*ListResponse, error)
+ AddResource(context.Context, *AddResourceRequest) (*emptypb.Empty, error)
+ DeleteResource(context.Context, *DeleteResourceRequest) (*emptypb.Empty, error)
+ ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error)
+}
+
+func RegisterTTRPCLeasesService(srv *ttrpc.Server, svc TTRPCLeasesService) {
+ srv.RegisterService("containerd.services.leases.v1.Leases", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CreateRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.List(ctx, &req)
+ },
+ "AddResource": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req AddResourceRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.AddResource(ctx, &req)
+ },
+ "DeleteResource": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteResourceRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.DeleteResource(ctx, &req)
+ },
+ "ListResources": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListResourcesRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.ListResources(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcleasesClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCLeasesClient(client *ttrpc.Client) TTRPCLeasesService {
+ return &ttrpcleasesClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcleasesClient) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) {
+ var resp CreateResponse
+ if err := c.client.Call(ctx, "containerd.services.leases.v1.Leases", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcleasesClient) Delete(ctx context.Context, req *DeleteRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.leases.v1.Leases", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcleasesClient) List(ctx context.Context, req *ListRequest) (*ListResponse, error) {
+ var resp ListResponse
+ if err := c.client.Call(ctx, "containerd.services.leases.v1.Leases", "List", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcleasesClient) AddResource(ctx context.Context, req *AddResourceRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.leases.v1.Leases", "AddResource", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcleasesClient) DeleteResource(ctx context.Context, req *DeleteResourceRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.leases.v1.Leases", "DeleteResource", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcleasesClient) ListResources(ctx context.Context, req *ListResourcesRequest) (*ListResourcesResponse, error) {
+ var resp ListResourcesResponse
+ if err := c.client.Call(ctx, "containerd.services.leases.v1.Leases", "ListResources", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_ttrpc.pb.go
new file mode 100644
index 000000000000..09f94c1d1f15
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_ttrpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
+package namespaces
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCNamespacesService interface {
+ Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error)
+ List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error)
+ Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
+ Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error)
+ Delete(context.Context, *DeleteNamespaceRequest) (*emptypb.Empty, error)
+}
+
+func RegisterTTRPCNamespacesService(srv *ttrpc.Server, svc TTRPCNamespacesService) {
+ srv.RegisterService("containerd.services.namespaces.v1.Namespaces", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req GetNamespaceRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Get(ctx, &req)
+ },
+ "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListNamespacesRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.List(ctx, &req)
+ },
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CreateNamespaceRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UpdateNamespaceRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteNamespaceRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcnamespacesClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCNamespacesClient(client *ttrpc.Client) TTRPCNamespacesService {
+ return &ttrpcnamespacesClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcnamespacesClient) Get(ctx context.Context, req *GetNamespaceRequest) (*GetNamespaceResponse, error) {
+ var resp GetNamespaceResponse
+ if err := c.client.Call(ctx, "containerd.services.namespaces.v1.Namespaces", "Get", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcnamespacesClient) List(ctx context.Context, req *ListNamespacesRequest) (*ListNamespacesResponse, error) {
+ var resp ListNamespacesResponse
+ if err := c.client.Call(ctx, "containerd.services.namespaces.v1.Namespaces", "List", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcnamespacesClient) Create(ctx context.Context, req *CreateNamespaceRequest) (*CreateNamespaceResponse, error) {
+ var resp CreateNamespaceResponse
+ if err := c.client.Call(ctx, "containerd.services.namespaces.v1.Namespaces", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcnamespacesClient) Update(ctx context.Context, req *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) {
+ var resp UpdateNamespaceResponse
+ if err := c.client.Call(ctx, "containerd.services.namespaces.v1.Namespaces", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcnamespacesClient) Delete(ctx context.Context, req *DeleteNamespaceRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.namespaces.v1.Namespaces", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go
index 49081aa8acb5..235081b7a8f7 100644
--- a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go
@@ -520,10 +520,13 @@ type ControllerCreateRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
- Rootfs []*types.Mount `protobuf:"bytes,2,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
- Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
- NetnsPath string `protobuf:"bytes,4,opt,name=netns_path,json=netnsPath,proto3" json:"netns_path,omitempty"`
+ SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Rootfs []*types.Mount `protobuf:"bytes,2,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
+ Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
+ NetnsPath string `protobuf:"bytes,4,opt,name=netns_path,json=netnsPath,proto3" json:"netns_path,omitempty"`
+ Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Sandbox *types.Sandbox `protobuf:"bytes,6,opt,name=sandbox,proto3" json:"sandbox,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerCreateRequest) Reset() {
@@ -586,6 +589,27 @@ func (x *ControllerCreateRequest) GetNetnsPath() string {
return ""
}
+func (x *ControllerCreateRequest) GetAnnotations() map[string]string {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+func (x *ControllerCreateRequest) GetSandbox() *types.Sandbox {
+ if x != nil {
+ return x.Sandbox
+ }
+ return nil
+}
+
+func (x *ControllerCreateRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerCreateResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -639,6 +663,7 @@ type ControllerStartRequest struct {
unknownFields protoimpl.UnknownFields
SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerStartRequest) Reset() {
@@ -680,6 +705,13 @@ func (x *ControllerStartRequest) GetSandboxID() string {
return ""
}
+func (x *ControllerStartRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerStartResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -689,6 +721,11 @@ type ControllerStartResponse struct {
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Address of the sandbox for containerd to connect,
+ // for calling Task or other APIs serving in the sandbox.
+ // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:.
+ Address string `protobuf:"bytes,5,opt,name=address,proto3" json:"address,omitempty"`
+ Version uint32 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"`
}
func (x *ControllerStartResponse) Reset() {
@@ -751,12 +788,27 @@ func (x *ControllerStartResponse) GetLabels() map[string]string {
return nil
}
+func (x *ControllerStartResponse) GetAddress() string {
+ if x != nil {
+ return x.Address
+ }
+ return ""
+}
+
+func (x *ControllerStartResponse) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
type ControllerPlatformRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerPlatformRequest) Reset() {
@@ -798,6 +850,13 @@ func (x *ControllerPlatformRequest) GetSandboxID() string {
return ""
}
+func (x *ControllerPlatformRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerPlatformResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -852,6 +911,7 @@ type ControllerStopRequest struct {
SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
TimeoutSecs uint32 `protobuf:"varint,2,opt,name=timeout_secs,json=timeoutSecs,proto3" json:"timeout_secs,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerStopRequest) Reset() {
@@ -900,6 +960,13 @@ func (x *ControllerStopRequest) GetTimeoutSecs() uint32 {
return 0
}
+func (x *ControllerStopRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerStopResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -944,6 +1011,7 @@ type ControllerWaitRequest struct {
unknownFields protoimpl.UnknownFields
SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerWaitRequest) Reset() {
@@ -985,6 +1053,13 @@ func (x *ControllerWaitRequest) GetSandboxID() string {
return ""
}
+func (x *ControllerWaitRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerWaitResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1047,6 +1122,7 @@ type ControllerStatusRequest struct {
SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerStatusRequest) Reset() {
@@ -1095,6 +1171,13 @@ func (x *ControllerStatusRequest) GetVerbose() bool {
return false
}
+func (x *ControllerStatusRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerStatusResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1107,6 +1190,11 @@ type ControllerStatusResponse struct {
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
ExitedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"`
Extra *anypb.Any `protobuf:"bytes,7,opt,name=extra,proto3" json:"extra,omitempty"`
+ // Address of the sandbox for containerd to connect,
+ // for calling Task or other APIs serving in the sandbox.
+ // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:.
+ Address string `protobuf:"bytes,8,opt,name=address,proto3" json:"address,omitempty"`
+ Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"`
}
func (x *ControllerStatusResponse) Reset() {
@@ -1190,12 +1278,27 @@ func (x *ControllerStatusResponse) GetExtra() *anypb.Any {
return nil
}
+func (x *ControllerStatusResponse) GetAddress() string {
+ if x != nil {
+ return x.Address
+ }
+ return ""
+}
+
+func (x *ControllerStatusResponse) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
type ControllerShutdownRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *ControllerShutdownRequest) Reset() {
@@ -1237,6 +1340,13 @@ func (x *ControllerShutdownRequest) GetSandboxID() string {
return ""
}
+func (x *ControllerShutdownRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type ControllerShutdownResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1275,6 +1385,217 @@ func (*ControllerShutdownResponse) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{23}
}
+type ControllerMetricsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
+}
+
+func (x *ControllerMetricsRequest) Reset() {
+ *x = ControllerMetricsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ControllerMetricsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ControllerMetricsRequest) ProtoMessage() {}
+
+func (x *ControllerMetricsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ControllerMetricsRequest.ProtoReflect.Descriptor instead.
+func (*ControllerMetricsRequest) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *ControllerMetricsRequest) GetSandboxID() string {
+ if x != nil {
+ return x.SandboxID
+ }
+ return ""
+}
+
+func (x *ControllerMetricsRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
+type ControllerMetricsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Metrics *types.Metric `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"`
+}
+
+func (x *ControllerMetricsResponse) Reset() {
+ *x = ControllerMetricsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ControllerMetricsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ControllerMetricsResponse) ProtoMessage() {}
+
+func (x *ControllerMetricsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ControllerMetricsResponse.ProtoReflect.Descriptor instead.
+func (*ControllerMetricsResponse) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *ControllerMetricsResponse) GetMetrics() *types.Metric {
+ if x != nil {
+ return x.Metrics
+ }
+ return nil
+}
+
+type ControllerUpdateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"`
+ Sandboxer string `protobuf:"bytes,2,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
+ Sandbox *types.Sandbox `protobuf:"bytes,3,opt,name=sandbox,proto3" json:"sandbox,omitempty"`
+ Fields []string `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty"`
+}
+
+func (x *ControllerUpdateRequest) Reset() {
+ *x = ControllerUpdateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ControllerUpdateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ControllerUpdateRequest) ProtoMessage() {}
+
+func (x *ControllerUpdateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ControllerUpdateRequest.ProtoReflect.Descriptor instead.
+func (*ControllerUpdateRequest) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *ControllerUpdateRequest) GetSandboxID() string {
+ if x != nil {
+ return x.SandboxID
+ }
+ return ""
+}
+
+func (x *ControllerUpdateRequest) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
+func (x *ControllerUpdateRequest) GetSandbox() *types.Sandbox {
+ if x != nil {
+ return x.Sandbox
+ }
+ return nil
+}
+
+func (x *ControllerUpdateRequest) GetFields() []string {
+ if x != nil {
+ return x.Fields
+ }
+ return nil
+}
+
+type ControllerUpdateResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ControllerUpdateResponse) Reset() {
+ *x = ControllerUpdateResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ControllerUpdateResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ControllerUpdateResponse) ProtoMessage() {}
+
+func (x *ControllerUpdateResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ControllerUpdateResponse.ProtoReflect.Descriptor instead.
+func (*ControllerUpdateResponse) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{27}
+}
+
var File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc = []byte{
@@ -1299,243 +1620,319 @@ var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_
0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70,
0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x22, 0x49, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x4a, 0x0a, 0x13,
- 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52,
- 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x61, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72,
- 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33,
- 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
- 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x4a, 0x0a, 0x13, 0x53,
- 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
- 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x33, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
- 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13,
- 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
- 0x73, 0x22, 0x42, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52,
- 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x47, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65,
- 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
- 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x22, 0xb8, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x72,
- 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d,
- 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, 0x2e, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
- 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x50, 0x61, 0x74, 0x68, 0x22, 0x39, 0x0a, 0x18, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x49, 0x0a, 0x12, 0x53,
+ 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x4a, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a,
+ 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x22, 0x61, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x4a, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x22, 0x33, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62,
0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e,
- 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a,
+ 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x42, 0x0a, 0x11, 0x53,
+ 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22,
+ 0x30, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49,
+ 0x64, 0x22, 0x47, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0xb7, 0x03, 0x0a, 0x17, 0x43,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06,
+ 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x5f,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x6e,
+ 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x6a, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x65, 0x72, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x39, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22,
- 0x9d, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c,
- 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61,
- 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x3a, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61,
- 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
- 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61,
- 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50,
- 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
- 0x6d, 0x22, 0x59, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53,
- 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d,
- 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x18, 0x0a, 0x16,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x72,
- 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74,
- 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65,
- 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69,
- 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64,
- 0x41, 0x74, 0x22, 0x52, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
- 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07,
- 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76,
- 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x22, 0x92, 0x03, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69,
+ 0x55, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x22, 0xd1, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49,
+ 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03,
+ 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5b,
+ 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a,
+ 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x19, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e,
+ 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
+ 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x77, 0x0a, 0x15, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x04, 0x69, 0x6e,
- 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x69, 0x6e,
- 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a,
- 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78,
- 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x78, 0x74,
- 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
- 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
- 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x19, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77,
- 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64,
- 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61,
- 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb7, 0x04, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12,
- 0x71, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
- 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e,
+ 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65,
+ 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x53, 0x65, 0x63, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x65, 0x72, 0x22, 0x18, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0x0a,
+ 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x65, 0x72, 0x22, 0x72, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37,
+ 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65,
+ 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x70, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49,
+ 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x22, 0xc6, 0x03, 0x0a, 0x18, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a,
+ 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74,
+ 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
+ 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x78, 0x74,
+ 0x72, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05,
+ 0x65, 0x78, 0x74, 0x72, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
+ 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66,
+ 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x22, 0x58, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
+ 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1c,
+ 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x22, 0x1c, 0x0a, 0x1a,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f,
+ 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x18, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x65, 0x72, 0x22, 0x4f, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12,
+ 0x1c, 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x12, 0x33, 0x0a,
+ 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb7, 0x04, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65,
+ 0x12, 0x71, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72,
+ 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53,
- 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74,
- 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
+ 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
+ 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
- 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73,
+ 0x74, 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
+ 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
- 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74,
- 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2f, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74,
- 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e,
+ 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2f, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53,
- 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32,
- 0xf6, 0x06, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x7b,
- 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x05, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62,
- 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f,
- 0x72, 0x6d, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x32, 0xf3, 0x08, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12,
+ 0x7b, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
- 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c,
- 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x05,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64,
+ 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
- 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x04, 0x53, 0x74, 0x6f,
- 0x70, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f,
- 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x75, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73,
- 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77,
- 0x6e, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75,
- 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66,
+ 0x6f, 0x72, 0x6d, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50,
+ 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f,
+ 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x04, 0x53, 0x74,
+ 0x6f, 0x70, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74,
+ 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x75, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+ 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f,
+ 0x77, 0x6e, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68,
+ 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77,
+ 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x07, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x12, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62,
+ 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
+ 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63,
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
- 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
- 0x78, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f,
+ 0x76, 0x31, 0x3b, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
@@ -1550,7 +1947,7 @@ func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto
return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData
}
-var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 26)
+var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 31)
var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes = []interface{}{
(*StoreCreateRequest)(nil), // 0: containerd.services.sandbox.v1.StoreCreateRequest
(*StoreCreateResponse)(nil), // 1: containerd.services.sandbox.v1.StoreCreateResponse
@@ -1576,60 +1973,74 @@ var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_
(*ControllerStatusResponse)(nil), // 21: containerd.services.sandbox.v1.ControllerStatusResponse
(*ControllerShutdownRequest)(nil), // 22: containerd.services.sandbox.v1.ControllerShutdownRequest
(*ControllerShutdownResponse)(nil), // 23: containerd.services.sandbox.v1.ControllerShutdownResponse
- nil, // 24: containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry
- nil, // 25: containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry
- (*types.Sandbox)(nil), // 26: containerd.types.Sandbox
- (*types.Mount)(nil), // 27: containerd.types.Mount
- (*anypb.Any)(nil), // 28: google.protobuf.Any
- (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp
- (*types.Platform)(nil), // 30: containerd.types.Platform
+ (*ControllerMetricsRequest)(nil), // 24: containerd.services.sandbox.v1.ControllerMetricsRequest
+ (*ControllerMetricsResponse)(nil), // 25: containerd.services.sandbox.v1.ControllerMetricsResponse
+ (*ControllerUpdateRequest)(nil), // 26: containerd.services.sandbox.v1.ControllerUpdateRequest
+ (*ControllerUpdateResponse)(nil), // 27: containerd.services.sandbox.v1.ControllerUpdateResponse
+ nil, // 28: containerd.services.sandbox.v1.ControllerCreateRequest.AnnotationsEntry
+ nil, // 29: containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry
+ nil, // 30: containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry
+ (*types.Sandbox)(nil), // 31: containerd.types.Sandbox
+ (*types.Mount)(nil), // 32: containerd.types.Mount
+ (*anypb.Any)(nil), // 33: google.protobuf.Any
+ (*timestamppb.Timestamp)(nil), // 34: google.protobuf.Timestamp
+ (*types.Platform)(nil), // 35: containerd.types.Platform
+ (*types.Metric)(nil), // 36: containerd.types.Metric
}
var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs = []int32{
- 26, // 0: containerd.services.sandbox.v1.StoreCreateRequest.sandbox:type_name -> containerd.types.Sandbox
- 26, // 1: containerd.services.sandbox.v1.StoreCreateResponse.sandbox:type_name -> containerd.types.Sandbox
- 26, // 2: containerd.services.sandbox.v1.StoreUpdateRequest.sandbox:type_name -> containerd.types.Sandbox
- 26, // 3: containerd.services.sandbox.v1.StoreUpdateResponse.sandbox:type_name -> containerd.types.Sandbox
- 26, // 4: containerd.services.sandbox.v1.StoreListResponse.list:type_name -> containerd.types.Sandbox
- 26, // 5: containerd.services.sandbox.v1.StoreGetResponse.sandbox:type_name -> containerd.types.Sandbox
- 27, // 6: containerd.services.sandbox.v1.ControllerCreateRequest.rootfs:type_name -> containerd.types.Mount
- 28, // 7: containerd.services.sandbox.v1.ControllerCreateRequest.options:type_name -> google.protobuf.Any
- 29, // 8: containerd.services.sandbox.v1.ControllerStartResponse.created_at:type_name -> google.protobuf.Timestamp
- 24, // 9: containerd.services.sandbox.v1.ControllerStartResponse.labels:type_name -> containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry
- 30, // 10: containerd.services.sandbox.v1.ControllerPlatformResponse.platform:type_name -> containerd.types.Platform
- 29, // 11: containerd.services.sandbox.v1.ControllerWaitResponse.exited_at:type_name -> google.protobuf.Timestamp
- 25, // 12: containerd.services.sandbox.v1.ControllerStatusResponse.info:type_name -> containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry
- 29, // 13: containerd.services.sandbox.v1.ControllerStatusResponse.created_at:type_name -> google.protobuf.Timestamp
- 29, // 14: containerd.services.sandbox.v1.ControllerStatusResponse.exited_at:type_name -> google.protobuf.Timestamp
- 28, // 15: containerd.services.sandbox.v1.ControllerStatusResponse.extra:type_name -> google.protobuf.Any
- 0, // 16: containerd.services.sandbox.v1.Store.Create:input_type -> containerd.services.sandbox.v1.StoreCreateRequest
- 2, // 17: containerd.services.sandbox.v1.Store.Update:input_type -> containerd.services.sandbox.v1.StoreUpdateRequest
- 4, // 18: containerd.services.sandbox.v1.Store.Delete:input_type -> containerd.services.sandbox.v1.StoreDeleteRequest
- 6, // 19: containerd.services.sandbox.v1.Store.List:input_type -> containerd.services.sandbox.v1.StoreListRequest
- 8, // 20: containerd.services.sandbox.v1.Store.Get:input_type -> containerd.services.sandbox.v1.StoreGetRequest
- 10, // 21: containerd.services.sandbox.v1.Controller.Create:input_type -> containerd.services.sandbox.v1.ControllerCreateRequest
- 12, // 22: containerd.services.sandbox.v1.Controller.Start:input_type -> containerd.services.sandbox.v1.ControllerStartRequest
- 14, // 23: containerd.services.sandbox.v1.Controller.Platform:input_type -> containerd.services.sandbox.v1.ControllerPlatformRequest
- 16, // 24: containerd.services.sandbox.v1.Controller.Stop:input_type -> containerd.services.sandbox.v1.ControllerStopRequest
- 18, // 25: containerd.services.sandbox.v1.Controller.Wait:input_type -> containerd.services.sandbox.v1.ControllerWaitRequest
- 20, // 26: containerd.services.sandbox.v1.Controller.Status:input_type -> containerd.services.sandbox.v1.ControllerStatusRequest
- 22, // 27: containerd.services.sandbox.v1.Controller.Shutdown:input_type -> containerd.services.sandbox.v1.ControllerShutdownRequest
- 1, // 28: containerd.services.sandbox.v1.Store.Create:output_type -> containerd.services.sandbox.v1.StoreCreateResponse
- 3, // 29: containerd.services.sandbox.v1.Store.Update:output_type -> containerd.services.sandbox.v1.StoreUpdateResponse
- 5, // 30: containerd.services.sandbox.v1.Store.Delete:output_type -> containerd.services.sandbox.v1.StoreDeleteResponse
- 7, // 31: containerd.services.sandbox.v1.Store.List:output_type -> containerd.services.sandbox.v1.StoreListResponse
- 9, // 32: containerd.services.sandbox.v1.Store.Get:output_type -> containerd.services.sandbox.v1.StoreGetResponse
- 11, // 33: containerd.services.sandbox.v1.Controller.Create:output_type -> containerd.services.sandbox.v1.ControllerCreateResponse
- 13, // 34: containerd.services.sandbox.v1.Controller.Start:output_type -> containerd.services.sandbox.v1.ControllerStartResponse
- 15, // 35: containerd.services.sandbox.v1.Controller.Platform:output_type -> containerd.services.sandbox.v1.ControllerPlatformResponse
- 17, // 36: containerd.services.sandbox.v1.Controller.Stop:output_type -> containerd.services.sandbox.v1.ControllerStopResponse
- 19, // 37: containerd.services.sandbox.v1.Controller.Wait:output_type -> containerd.services.sandbox.v1.ControllerWaitResponse
- 21, // 38: containerd.services.sandbox.v1.Controller.Status:output_type -> containerd.services.sandbox.v1.ControllerStatusResponse
- 23, // 39: containerd.services.sandbox.v1.Controller.Shutdown:output_type -> containerd.services.sandbox.v1.ControllerShutdownResponse
- 28, // [28:40] is the sub-list for method output_type
- 16, // [16:28] is the sub-list for method input_type
- 16, // [16:16] is the sub-list for extension type_name
- 16, // [16:16] is the sub-list for extension extendee
- 0, // [0:16] is the sub-list for field type_name
+ 31, // 0: containerd.services.sandbox.v1.StoreCreateRequest.sandbox:type_name -> containerd.types.Sandbox
+ 31, // 1: containerd.services.sandbox.v1.StoreCreateResponse.sandbox:type_name -> containerd.types.Sandbox
+ 31, // 2: containerd.services.sandbox.v1.StoreUpdateRequest.sandbox:type_name -> containerd.types.Sandbox
+ 31, // 3: containerd.services.sandbox.v1.StoreUpdateResponse.sandbox:type_name -> containerd.types.Sandbox
+ 31, // 4: containerd.services.sandbox.v1.StoreListResponse.list:type_name -> containerd.types.Sandbox
+ 31, // 5: containerd.services.sandbox.v1.StoreGetResponse.sandbox:type_name -> containerd.types.Sandbox
+ 32, // 6: containerd.services.sandbox.v1.ControllerCreateRequest.rootfs:type_name -> containerd.types.Mount
+ 33, // 7: containerd.services.sandbox.v1.ControllerCreateRequest.options:type_name -> google.protobuf.Any
+ 28, // 8: containerd.services.sandbox.v1.ControllerCreateRequest.annotations:type_name -> containerd.services.sandbox.v1.ControllerCreateRequest.AnnotationsEntry
+ 31, // 9: containerd.services.sandbox.v1.ControllerCreateRequest.sandbox:type_name -> containerd.types.Sandbox
+ 34, // 10: containerd.services.sandbox.v1.ControllerStartResponse.created_at:type_name -> google.protobuf.Timestamp
+ 29, // 11: containerd.services.sandbox.v1.ControllerStartResponse.labels:type_name -> containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry
+ 35, // 12: containerd.services.sandbox.v1.ControllerPlatformResponse.platform:type_name -> containerd.types.Platform
+ 34, // 13: containerd.services.sandbox.v1.ControllerWaitResponse.exited_at:type_name -> google.protobuf.Timestamp
+ 30, // 14: containerd.services.sandbox.v1.ControllerStatusResponse.info:type_name -> containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry
+ 34, // 15: containerd.services.sandbox.v1.ControllerStatusResponse.created_at:type_name -> google.protobuf.Timestamp
+ 34, // 16: containerd.services.sandbox.v1.ControllerStatusResponse.exited_at:type_name -> google.protobuf.Timestamp
+ 33, // 17: containerd.services.sandbox.v1.ControllerStatusResponse.extra:type_name -> google.protobuf.Any
+ 36, // 18: containerd.services.sandbox.v1.ControllerMetricsResponse.metrics:type_name -> containerd.types.Metric
+ 31, // 19: containerd.services.sandbox.v1.ControllerUpdateRequest.sandbox:type_name -> containerd.types.Sandbox
+ 0, // 20: containerd.services.sandbox.v1.Store.Create:input_type -> containerd.services.sandbox.v1.StoreCreateRequest
+ 2, // 21: containerd.services.sandbox.v1.Store.Update:input_type -> containerd.services.sandbox.v1.StoreUpdateRequest
+ 4, // 22: containerd.services.sandbox.v1.Store.Delete:input_type -> containerd.services.sandbox.v1.StoreDeleteRequest
+ 6, // 23: containerd.services.sandbox.v1.Store.List:input_type -> containerd.services.sandbox.v1.StoreListRequest
+ 8, // 24: containerd.services.sandbox.v1.Store.Get:input_type -> containerd.services.sandbox.v1.StoreGetRequest
+ 10, // 25: containerd.services.sandbox.v1.Controller.Create:input_type -> containerd.services.sandbox.v1.ControllerCreateRequest
+ 12, // 26: containerd.services.sandbox.v1.Controller.Start:input_type -> containerd.services.sandbox.v1.ControllerStartRequest
+ 14, // 27: containerd.services.sandbox.v1.Controller.Platform:input_type -> containerd.services.sandbox.v1.ControllerPlatformRequest
+ 16, // 28: containerd.services.sandbox.v1.Controller.Stop:input_type -> containerd.services.sandbox.v1.ControllerStopRequest
+ 18, // 29: containerd.services.sandbox.v1.Controller.Wait:input_type -> containerd.services.sandbox.v1.ControllerWaitRequest
+ 20, // 30: containerd.services.sandbox.v1.Controller.Status:input_type -> containerd.services.sandbox.v1.ControllerStatusRequest
+ 22, // 31: containerd.services.sandbox.v1.Controller.Shutdown:input_type -> containerd.services.sandbox.v1.ControllerShutdownRequest
+ 24, // 32: containerd.services.sandbox.v1.Controller.Metrics:input_type -> containerd.services.sandbox.v1.ControllerMetricsRequest
+ 26, // 33: containerd.services.sandbox.v1.Controller.Update:input_type -> containerd.services.sandbox.v1.ControllerUpdateRequest
+ 1, // 34: containerd.services.sandbox.v1.Store.Create:output_type -> containerd.services.sandbox.v1.StoreCreateResponse
+ 3, // 35: containerd.services.sandbox.v1.Store.Update:output_type -> containerd.services.sandbox.v1.StoreUpdateResponse
+ 5, // 36: containerd.services.sandbox.v1.Store.Delete:output_type -> containerd.services.sandbox.v1.StoreDeleteResponse
+ 7, // 37: containerd.services.sandbox.v1.Store.List:output_type -> containerd.services.sandbox.v1.StoreListResponse
+ 9, // 38: containerd.services.sandbox.v1.Store.Get:output_type -> containerd.services.sandbox.v1.StoreGetResponse
+ 11, // 39: containerd.services.sandbox.v1.Controller.Create:output_type -> containerd.services.sandbox.v1.ControllerCreateResponse
+ 13, // 40: containerd.services.sandbox.v1.Controller.Start:output_type -> containerd.services.sandbox.v1.ControllerStartResponse
+ 15, // 41: containerd.services.sandbox.v1.Controller.Platform:output_type -> containerd.services.sandbox.v1.ControllerPlatformResponse
+ 17, // 42: containerd.services.sandbox.v1.Controller.Stop:output_type -> containerd.services.sandbox.v1.ControllerStopResponse
+ 19, // 43: containerd.services.sandbox.v1.Controller.Wait:output_type -> containerd.services.sandbox.v1.ControllerWaitResponse
+ 21, // 44: containerd.services.sandbox.v1.Controller.Status:output_type -> containerd.services.sandbox.v1.ControllerStatusResponse
+ 23, // 45: containerd.services.sandbox.v1.Controller.Shutdown:output_type -> containerd.services.sandbox.v1.ControllerShutdownResponse
+ 25, // 46: containerd.services.sandbox.v1.Controller.Metrics:output_type -> containerd.services.sandbox.v1.ControllerMetricsResponse
+ 27, // 47: containerd.services.sandbox.v1.Controller.Update:output_type -> containerd.services.sandbox.v1.ControllerUpdateResponse
+ 34, // [34:48] is the sub-list for method output_type
+ 20, // [20:34] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_init() }
@@ -1926,6 +2337,54 @@ func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto
return nil
}
}
+ file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ControllerMetricsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ControllerMetricsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ControllerUpdateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ControllerUpdateResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -1933,7 +2392,7 @@ func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc,
NumEnums: 0,
- NumMessages: 26,
+ NumMessages: 31,
NumExtensions: 0,
NumServices: 2,
},
diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto
index 40a2db24f663..87e6ec3a34bd 100644
--- a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto
+++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto
@@ -31,6 +31,7 @@ import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/sandbox.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/api/types/platform.proto";
+import "github.com/containerd/containerd/api/types/metrics.proto";
option go_package = "github.com/containerd/containerd/api/services/sandbox/v1;sandbox";
@@ -93,6 +94,8 @@ service Controller {
rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse);
rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse);
rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse);
+ rpc Metrics(ControllerMetricsRequest) returns (ControllerMetricsResponse);
+ rpc Update(ControllerUpdateRequest) returns (ControllerUpdateResponse);
}
message ControllerCreateRequest {
@@ -100,6 +103,9 @@ message ControllerCreateRequest {
repeated containerd.types.Mount rootfs = 2;
google.protobuf.Any options = 3;
string netns_path = 4;
+ map annotations = 5;
+ containerd.types.Sandbox sandbox = 6;
+ string sandboxer = 10;
}
message ControllerCreateResponse {
@@ -108,6 +114,7 @@ message ControllerCreateResponse {
message ControllerStartRequest {
string sandbox_id = 1;
+ string sandboxer = 10;
}
message ControllerStartResponse {
@@ -115,10 +122,16 @@ message ControllerStartResponse {
uint32 pid = 2;
google.protobuf.Timestamp created_at = 3;
map labels = 4;
+ // Address of the sandbox for containerd to connect,
+ // for calling Task or other APIs serving in the sandbox.
+ // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:.
+ string address = 5;
+ uint32 version = 6;
}
message ControllerPlatformRequest {
string sandbox_id = 1;
+ string sandboxer = 10;
}
message ControllerPlatformResponse {
@@ -128,12 +141,14 @@ message ControllerPlatformResponse {
message ControllerStopRequest {
string sandbox_id = 1;
uint32 timeout_secs = 2;
+ string sandboxer = 10;
}
message ControllerStopResponse {}
message ControllerWaitRequest {
string sandbox_id = 1;
+ string sandboxer = 10;
}
message ControllerWaitResponse {
@@ -144,6 +159,7 @@ message ControllerWaitResponse {
message ControllerStatusRequest {
string sandbox_id = 1;
bool verbose = 2;
+ string sandboxer = 10;
}
message ControllerStatusResponse {
@@ -154,10 +170,35 @@ message ControllerStatusResponse {
google.protobuf.Timestamp created_at = 5;
google.protobuf.Timestamp exited_at = 6;
google.protobuf.Any extra = 7;
+ // Address of the sandbox for containerd to connect,
+ // for calling Task or other APIs serving in the sandbox.
+ // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:.
+ string address = 8;
+ uint32 version = 9;
}
message ControllerShutdownRequest {
string sandbox_id = 1;
+ string sandboxer = 10;
}
message ControllerShutdownResponse {}
+
+message ControllerMetricsRequest {
+ string sandbox_id = 1;
+ string sandboxer = 10;
+}
+
+message ControllerMetricsResponse {
+ types.Metric metrics = 1;
+}
+
+message ControllerUpdateRequest {
+ string sandbox_id = 1;
+ string sandboxer = 2;
+ containerd.types.Sandbox sandbox = 3;
+ repeated string fields = 4;
+}
+
+message ControllerUpdateResponse {
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go
index 16d746d75cf7..4815d31e3343 100644
--- a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go
@@ -259,6 +259,8 @@ type ControllerClient interface {
Wait(ctx context.Context, in *ControllerWaitRequest, opts ...grpc.CallOption) (*ControllerWaitResponse, error)
Status(ctx context.Context, in *ControllerStatusRequest, opts ...grpc.CallOption) (*ControllerStatusResponse, error)
Shutdown(ctx context.Context, in *ControllerShutdownRequest, opts ...grpc.CallOption) (*ControllerShutdownResponse, error)
+ Metrics(ctx context.Context, in *ControllerMetricsRequest, opts ...grpc.CallOption) (*ControllerMetricsResponse, error)
+ Update(ctx context.Context, in *ControllerUpdateRequest, opts ...grpc.CallOption) (*ControllerUpdateResponse, error)
}
type controllerClient struct {
@@ -332,6 +334,24 @@ func (c *controllerClient) Shutdown(ctx context.Context, in *ControllerShutdownR
return out, nil
}
+func (c *controllerClient) Metrics(ctx context.Context, in *ControllerMetricsRequest, opts ...grpc.CallOption) (*ControllerMetricsResponse, error) {
+ out := new(ControllerMetricsResponse)
+ err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Metrics", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controllerClient) Update(ctx context.Context, in *ControllerUpdateRequest, opts ...grpc.CallOption) (*ControllerUpdateResponse, error) {
+ out := new(ControllerUpdateResponse)
+ err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Update", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// ControllerServer is the server API for Controller service.
// All implementations must embed UnimplementedControllerServer
// for forward compatibility
@@ -343,6 +363,8 @@ type ControllerServer interface {
Wait(context.Context, *ControllerWaitRequest) (*ControllerWaitResponse, error)
Status(context.Context, *ControllerStatusRequest) (*ControllerStatusResponse, error)
Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error)
+ Metrics(context.Context, *ControllerMetricsRequest) (*ControllerMetricsResponse, error)
+ Update(context.Context, *ControllerUpdateRequest) (*ControllerUpdateResponse, error)
mustEmbedUnimplementedControllerServer()
}
@@ -371,6 +393,12 @@ func (UnimplementedControllerServer) Status(context.Context, *ControllerStatusRe
func (UnimplementedControllerServer) Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented")
}
+func (UnimplementedControllerServer) Metrics(context.Context, *ControllerMetricsRequest) (*ControllerMetricsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented")
+}
+func (UnimplementedControllerServer) Update(context.Context, *ControllerUpdateRequest) (*ControllerUpdateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
func (UnimplementedControllerServer) mustEmbedUnimplementedControllerServer() {}
// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service.
@@ -510,6 +538,42 @@ func _Controller_Shutdown_Handler(srv interface{}, ctx context.Context, dec func
return interceptor(ctx, in, info, handler)
}
+func _Controller_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ControllerMetricsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControllerServer).Metrics(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/containerd.services.sandbox.v1.Controller/Metrics",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControllerServer).Metrics(ctx, req.(*ControllerMetricsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ControllerUpdateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControllerServer).Update(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/containerd.services.sandbox.v1.Controller/Update",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControllerServer).Update(ctx, req.(*ControllerUpdateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -545,6 +609,14 @@ var Controller_ServiceDesc = grpc.ServiceDesc{
MethodName: "Shutdown",
Handler: _Controller_Shutdown_Handler,
},
+ {
+ MethodName: "Metrics",
+ Handler: _Controller_Metrics_Handler,
+ },
+ {
+ MethodName: "Update",
+ Handler: _Controller_Update_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto",
diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_ttrpc.pb.go
new file mode 100644
index 000000000000..924a54f6e71f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_ttrpc.pb.go
@@ -0,0 +1,272 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto
+package sandbox
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+)
+
+type TTRPCStoreService interface {
+ Create(context.Context, *StoreCreateRequest) (*StoreCreateResponse, error)
+ Update(context.Context, *StoreUpdateRequest) (*StoreUpdateResponse, error)
+ Delete(context.Context, *StoreDeleteRequest) (*StoreDeleteResponse, error)
+ List(context.Context, *StoreListRequest) (*StoreListResponse, error)
+ Get(context.Context, *StoreGetRequest) (*StoreGetResponse, error)
+}
+
+func RegisterTTRPCStoreService(srv *ttrpc.Server, svc TTRPCStoreService) {
+ srv.RegisterService("containerd.services.sandbox.v1.Store", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StoreCreateRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StoreUpdateRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StoreDeleteRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StoreListRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.List(ctx, &req)
+ },
+ "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StoreGetRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Get(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcstoreClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCStoreClient(client *ttrpc.Client) TTRPCStoreService {
+ return &ttrpcstoreClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcstoreClient) Create(ctx context.Context, req *StoreCreateRequest) (*StoreCreateResponse, error) {
+ var resp StoreCreateResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Store", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcstoreClient) Update(ctx context.Context, req *StoreUpdateRequest) (*StoreUpdateResponse, error) {
+ var resp StoreUpdateResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Store", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcstoreClient) Delete(ctx context.Context, req *StoreDeleteRequest) (*StoreDeleteResponse, error) {
+ var resp StoreDeleteResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Store", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcstoreClient) List(ctx context.Context, req *StoreListRequest) (*StoreListResponse, error) {
+ var resp StoreListResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Store", "List", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcstoreClient) Get(ctx context.Context, req *StoreGetRequest) (*StoreGetResponse, error) {
+ var resp StoreGetResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Store", "Get", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+type TTRPCControllerService interface {
+ Create(context.Context, *ControllerCreateRequest) (*ControllerCreateResponse, error)
+ Start(context.Context, *ControllerStartRequest) (*ControllerStartResponse, error)
+ Platform(context.Context, *ControllerPlatformRequest) (*ControllerPlatformResponse, error)
+ Stop(context.Context, *ControllerStopRequest) (*ControllerStopResponse, error)
+ Wait(context.Context, *ControllerWaitRequest) (*ControllerWaitResponse, error)
+ Status(context.Context, *ControllerStatusRequest) (*ControllerStatusResponse, error)
+ Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error)
+ Metrics(context.Context, *ControllerMetricsRequest) (*ControllerMetricsResponse, error)
+ Update(context.Context, *ControllerUpdateRequest) (*ControllerUpdateResponse, error)
+}
+
+func RegisterTTRPCControllerService(srv *ttrpc.Server, svc TTRPCControllerService) {
+ srv.RegisterService("containerd.services.sandbox.v1.Controller", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerCreateRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerStartRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Start(ctx, &req)
+ },
+ "Platform": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerPlatformRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Platform(ctx, &req)
+ },
+ "Stop": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerStopRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Stop(ctx, &req)
+ },
+ "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerWaitRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Wait(ctx, &req)
+ },
+ "Status": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerStatusRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Status(ctx, &req)
+ },
+ "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerShutdownRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Shutdown(ctx, &req)
+ },
+ "Metrics": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerMetricsRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Metrics(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ControllerUpdateRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpccontrollerClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCControllerClient(client *ttrpc.Client) TTRPCControllerService {
+ return &ttrpccontrollerClient{
+ client: client,
+ }
+}
+
+func (c *ttrpccontrollerClient) Create(ctx context.Context, req *ControllerCreateRequest) (*ControllerCreateResponse, error) {
+ var resp ControllerCreateResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Start(ctx context.Context, req *ControllerStartRequest) (*ControllerStartResponse, error) {
+ var resp ControllerStartResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Start", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Platform(ctx context.Context, req *ControllerPlatformRequest) (*ControllerPlatformResponse, error) {
+ var resp ControllerPlatformResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Platform", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Stop(ctx context.Context, req *ControllerStopRequest) (*ControllerStopResponse, error) {
+ var resp ControllerStopResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Stop", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Wait(ctx context.Context, req *ControllerWaitRequest) (*ControllerWaitResponse, error) {
+ var resp ControllerWaitResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Wait", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Status(ctx context.Context, req *ControllerStatusRequest) (*ControllerStatusResponse, error) {
+ var resp ControllerStatusResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Status", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Shutdown(ctx context.Context, req *ControllerShutdownRequest) (*ControllerShutdownResponse, error) {
+ var resp ControllerShutdownResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Shutdown", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Metrics(ctx context.Context, req *ControllerMetricsRequest) (*ControllerMetricsResponse, error) {
+ var resp ControllerMetricsResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Metrics", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpccontrollerClient) Update(ctx context.Context, req *ControllerUpdateRequest) (*ControllerUpdateResponse, error) {
+ var resp ControllerUpdateResponse
+ if err := c.client.Call(ctx, "containerd.services.sandbox.v1.Controller", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_ttrpc.pb.go
new file mode 100644
index 000000000000..8ab278cb4e03
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_ttrpc.pb.go
@@ -0,0 +1,242 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
+package snapshots
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCSnapshotsService interface {
+ Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error)
+ View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error)
+ Mounts(context.Context, *MountsRequest) (*MountsResponse, error)
+ Commit(context.Context, *CommitSnapshotRequest) (*emptypb.Empty, error)
+ Remove(context.Context, *RemoveSnapshotRequest) (*emptypb.Empty, error)
+ Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error)
+ Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error)
+ List(context.Context, *ListSnapshotsRequest, TTRPCSnapshots_ListServer) error
+ Usage(context.Context, *UsageRequest) (*UsageResponse, error)
+ Cleanup(context.Context, *CleanupRequest) (*emptypb.Empty, error)
+}
+
+type TTRPCSnapshots_ListServer interface {
+ Send(*ListSnapshotsResponse) error
+ ttrpc.StreamServer
+}
+
+type ttrpcsnapshotsListServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpcsnapshotsListServer) Send(m *ListSnapshotsResponse) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+func RegisterTTRPCSnapshotsService(srv *ttrpc.Server, svc TTRPCSnapshotsService) {
+ srv.RegisterService("containerd.services.snapshots.v1.Snapshots", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Prepare": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req PrepareSnapshotRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Prepare(ctx, &req)
+ },
+ "View": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ViewSnapshotRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.View(ctx, &req)
+ },
+ "Mounts": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req MountsRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Mounts(ctx, &req)
+ },
+ "Commit": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CommitSnapshotRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Commit(ctx, &req)
+ },
+ "Remove": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req RemoveSnapshotRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Remove(ctx, &req)
+ },
+ "Stat": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StatSnapshotRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Stat(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UpdateSnapshotRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Usage": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UsageRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Usage(ctx, &req)
+ },
+ "Cleanup": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CleanupRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Cleanup(ctx, &req)
+ },
+ },
+ Streams: map[string]ttrpc.Stream{
+ "List": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ m := new(ListSnapshotsRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return nil, svc.List(ctx, m, &ttrpcsnapshotsListServer{stream})
+ },
+ StreamingClient: false,
+ StreamingServer: true,
+ },
+ },
+ })
+}
+
+type TTRPCSnapshotsClient interface {
+ Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error)
+ View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error)
+ Mounts(context.Context, *MountsRequest) (*MountsResponse, error)
+ Commit(context.Context, *CommitSnapshotRequest) (*emptypb.Empty, error)
+ Remove(context.Context, *RemoveSnapshotRequest) (*emptypb.Empty, error)
+ Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error)
+ Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error)
+ List(context.Context, *ListSnapshotsRequest) (TTRPCSnapshots_ListClient, error)
+ Usage(context.Context, *UsageRequest) (*UsageResponse, error)
+ Cleanup(context.Context, *CleanupRequest) (*emptypb.Empty, error)
+}
+
+type ttrpcsnapshotsClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCSnapshotsClient(client *ttrpc.Client) TTRPCSnapshotsClient {
+ return &ttrpcsnapshotsClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcsnapshotsClient) Prepare(ctx context.Context, req *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) {
+ var resp PrepareSnapshotResponse
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Prepare", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) View(ctx context.Context, req *ViewSnapshotRequest) (*ViewSnapshotResponse, error) {
+ var resp ViewSnapshotResponse
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "View", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) Mounts(ctx context.Context, req *MountsRequest) (*MountsResponse, error) {
+ var resp MountsResponse
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Mounts", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) Commit(ctx context.Context, req *CommitSnapshotRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Commit", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) Remove(ctx context.Context, req *RemoveSnapshotRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Remove", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) Stat(ctx context.Context, req *StatSnapshotRequest) (*StatSnapshotResponse, error) {
+ var resp StatSnapshotResponse
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Stat", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) Update(ctx context.Context, req *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) {
+ var resp UpdateSnapshotResponse
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) List(ctx context.Context, req *ListSnapshotsRequest) (TTRPCSnapshots_ListClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: false,
+ StreamingServer: true,
+ }, "containerd.services.snapshots.v1.Snapshots", "List", req)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpcsnapshotsListClient{stream}
+ return x, nil
+}
+
+type TTRPCSnapshots_ListClient interface {
+ Recv() (*ListSnapshotsResponse, error)
+ ttrpc.ClientStream
+}
+
+type ttrpcsnapshotsListClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpcsnapshotsListClient) Recv() (*ListSnapshotsResponse, error) {
+ m := new(ListSnapshotsResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *ttrpcsnapshotsClient) Usage(ctx context.Context, req *UsageRequest) (*UsageResponse, error) {
+ var resp UsageResponse
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Usage", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpcsnapshotsClient) Cleanup(ctx context.Context, req *CleanupRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.snapshots.v1.Snapshots", "Cleanup", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_ttrpc.pb.go
new file mode 100644
index 000000000000..9e30233d4311
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_ttrpc.pb.go
@@ -0,0 +1,97 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto
+package streaming
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+type TTRPCStreamingService interface {
+ Stream(context.Context, TTRPCStreaming_StreamServer) error
+}
+
+type TTRPCStreaming_StreamServer interface {
+ Send(*anypb.Any) error
+ Recv() (*anypb.Any, error)
+ ttrpc.StreamServer
+}
+
+type ttrpcstreamingStreamServer struct {
+ ttrpc.StreamServer
+}
+
+func (x *ttrpcstreamingStreamServer) Send(m *anypb.Any) error {
+ return x.StreamServer.SendMsg(m)
+}
+
+func (x *ttrpcstreamingStreamServer) Recv() (*anypb.Any, error) {
+ m := new(anypb.Any)
+ if err := x.StreamServer.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func RegisterTTRPCStreamingService(srv *ttrpc.Server, svc TTRPCStreamingService) {
+ srv.RegisterService("containerd.services.streaming.v1.Streaming", &ttrpc.ServiceDesc{
+ Streams: map[string]ttrpc.Stream{
+ "Stream": {
+ Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+ return nil, svc.Stream(ctx, &ttrpcstreamingStreamServer{stream})
+ },
+ StreamingClient: true,
+ StreamingServer: true,
+ },
+ },
+ })
+}
+
+type TTRPCStreamingClient interface {
+ Stream(context.Context) (TTRPCStreaming_StreamClient, error)
+}
+
+type ttrpcstreamingClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCStreamingClient(client *ttrpc.Client) TTRPCStreamingClient {
+ return &ttrpcstreamingClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcstreamingClient) Stream(ctx context.Context) (TTRPCStreaming_StreamClient, error) {
+ stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+ StreamingClient: true,
+ StreamingServer: true,
+ }, "containerd.services.streaming.v1.Streaming", "Stream", nil)
+ if err != nil {
+ return nil, err
+ }
+ x := &ttrpcstreamingStreamClient{stream}
+ return x, nil
+}
+
+type TTRPCStreaming_StreamClient interface {
+ Send(*anypb.Any) error
+ Recv() (*anypb.Any, error)
+ ttrpc.ClientStream
+}
+
+type ttrpcstreamingStreamClient struct {
+ ttrpc.ClientStream
+}
+
+func (x *ttrpcstreamingStreamClient) Send(m *anypb.Any) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *ttrpcstreamingStreamClient) Recv() (*anypb.Any, error) {
+ m := new(anypb.Any)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go
new file mode 100644
index 000000000000..859eec58e0b1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go
@@ -0,0 +1,301 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
+package tasks
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCTasksService interface {
+ Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
+ Start(context.Context, *StartRequest) (*StartResponse, error)
+ Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error)
+ DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error)
+ Get(context.Context, *GetRequest) (*GetResponse, error)
+ List(context.Context, *ListTasksRequest) (*ListTasksResponse, error)
+ Kill(context.Context, *KillRequest) (*emptypb.Empty, error)
+ Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error)
+ ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error)
+ CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error)
+ Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error)
+ Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error)
+ ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
+ Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error)
+ Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error)
+ Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error)
+ Wait(context.Context, *WaitRequest) (*WaitResponse, error)
+}
+
+func RegisterTTRPCTasksService(srv *ttrpc.Server, svc TTRPCTasksService) {
+ srv.RegisterService("containerd.services.tasks.v1.Tasks", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CreateTaskRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Create(ctx, &req)
+ },
+ "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req StartRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Start(ctx, &req)
+ },
+ "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteTaskRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Delete(ctx, &req)
+ },
+ "DeleteProcess": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req DeleteProcessRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.DeleteProcess(ctx, &req)
+ },
+ "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req GetRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Get(ctx, &req)
+ },
+ "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListTasksRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.List(ctx, &req)
+ },
+ "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req KillRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Kill(ctx, &req)
+ },
+ "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ExecProcessRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Exec(ctx, &req)
+ },
+ "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ResizePtyRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.ResizePty(ctx, &req)
+ },
+ "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CloseIORequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.CloseIO(ctx, &req)
+ },
+ "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req PauseTaskRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Pause(ctx, &req)
+ },
+ "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ResumeTaskRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Resume(ctx, &req)
+ },
+ "ListPids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req ListPidsRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.ListPids(ctx, &req)
+ },
+ "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req CheckpointTaskRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Checkpoint(ctx, &req)
+ },
+ "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req UpdateTaskRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Update(ctx, &req)
+ },
+ "Metrics": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req MetricsRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Metrics(ctx, &req)
+ },
+ "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req WaitRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Wait(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpctasksClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCTasksClient(client *ttrpc.Client) TTRPCTasksService {
+ return &ttrpctasksClient{
+ client: client,
+ }
+}
+
+func (c *ttrpctasksClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) {
+ var resp CreateTaskResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Create", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) {
+ var resp StartResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Start", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Delete(ctx context.Context, req *DeleteTaskRequest) (*DeleteResponse, error) {
+ var resp DeleteResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Delete", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) {
+ var resp DeleteResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "DeleteProcess", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) {
+ var resp GetResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Get", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) List(ctx context.Context, req *ListTasksRequest) (*ListTasksResponse, error) {
+ var resp ListTasksResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "List", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Kill(ctx context.Context, req *KillRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Kill", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Exec(ctx context.Context, req *ExecProcessRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Exec", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "ResizePty", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) CloseIO(ctx context.Context, req *CloseIORequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "CloseIO", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Pause(ctx context.Context, req *PauseTaskRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Pause", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Resume(ctx context.Context, req *ResumeTaskRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Resume", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) {
+ var resp ListPidsResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "ListPids", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*CheckpointTaskResponse, error) {
+ var resp CheckpointTaskResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Checkpoint", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Update(ctx context.Context, req *UpdateTaskRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Update", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Metrics(ctx context.Context, req *MetricsRequest) (*MetricsResponse, error) {
+ var resp MetricsResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Metrics", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+func (c *ttrpctasksClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) {
+ var resp WaitResponse
+ if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Wait", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_ttrpc.pb.go
new file mode 100644
index 000000000000..848eb1933fa0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_ttrpc.pb.go
@@ -0,0 +1,45 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto
+package transfer
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCTransferService interface {
+ Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error)
+}
+
+func RegisterTTRPCTransferService(srv *ttrpc.Server, svc TTRPCTransferService) {
+ srv.RegisterService("containerd.services.transfer.v1.Transfer", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Transfer": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req TransferRequest
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Transfer(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpctransferClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCTransferClient(client *ttrpc.Client) TTRPCTransferService {
+ return &ttrpctransferClient{
+ client: client,
+ }
+}
+
+func (c *ttrpctransferClient) Transfer(ctx context.Context, req *TransferRequest) (*emptypb.Empty, error) {
+ var resp emptypb.Empty
+ if err := c.client.Call(ctx, "containerd.services.transfer.v1.Transfer", "Transfer", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go
index d3d9839d639a..b374dde261bb 100644
--- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go
+++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go
@@ -16,3 +16,8 @@
// Package events defines the ttrpc event service.
package events
+
+import types "github.com/containerd/containerd/api/types"
+
+// Deprecated: Use [types.Envelope].
+type Envelope = types.Envelope
diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go
index 68481a84b39d..d7e6bdfd45e3 100644
--- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go
@@ -22,12 +22,10 @@
package events
import (
- _ "github.com/containerd/containerd/api/types"
+ types "github.com/containerd/containerd/api/types"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- anypb "google.golang.org/protobuf/types/known/anypb"
emptypb "google.golang.org/protobuf/types/known/emptypb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
@@ -44,7 +42,7 @@ type ForwardRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
+ Envelope *types.Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
}
func (x *ForwardRequest) Reset() {
@@ -79,84 +77,13 @@ func (*ForwardRequest) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP(), []int{0}
}
-func (x *ForwardRequest) GetEnvelope() *Envelope {
+func (x *ForwardRequest) GetEnvelope() *types.Envelope {
if x != nil {
return x.Envelope
}
return nil
}
-type Envelope struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
- Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
-}
-
-func (x *Envelope) Reset() {
- *x = Envelope{}
- if protoimpl.UnsafeEnabled {
- mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Envelope) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Envelope) ProtoMessage() {}
-
-func (x *Envelope) ProtoReflect() protoreflect.Message {
- mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
-func (*Envelope) Descriptor() ([]byte, []int) {
- return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Envelope) GetTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-
-func (x *Envelope) GetNamespace() string {
- if x != nil {
- return x.Namespace
- }
- return ""
-}
-
-func (x *Envelope) GetTopic() string {
- if x != nil {
- return x.Topic
- }
- return ""
-}
-
-func (x *Envelope) GetEvent() *anypb.Any {
- if x != nil {
- return x.Event
- }
- return nil
-}
-
var File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc = []byte{
@@ -167,32 +94,16 @@ var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_pr
0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x76,
- 0x31, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
+ 0x31, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
- 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61,
- 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65,
- 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x76, 0x31,
- 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c,
- 0x6f, 0x70, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65,
- 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
- 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69,
- 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a,
- 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01,
+ 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x48, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65,
+ 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e,
+ 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65,
0x32, 0x60, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x56, 0x0a, 0x07, 0x46, 0x6f,
0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e,
@@ -219,25 +130,21 @@ func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_p
return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData
}
-var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes = []interface{}{
- (*ForwardRequest)(nil), // 0: containerd.services.events.ttrpc.v1.ForwardRequest
- (*Envelope)(nil), // 1: containerd.services.events.ttrpc.v1.Envelope
- (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
- (*anypb.Any)(nil), // 3: google.protobuf.Any
- (*emptypb.Empty)(nil), // 4: google.protobuf.Empty
+ (*ForwardRequest)(nil), // 0: containerd.services.events.ttrpc.v1.ForwardRequest
+ (*types.Envelope)(nil), // 1: containerd.types.Envelope
+ (*emptypb.Empty)(nil), // 2: google.protobuf.Empty
}
var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs = []int32{
- 1, // 0: containerd.services.events.ttrpc.v1.ForwardRequest.envelope:type_name -> containerd.services.events.ttrpc.v1.Envelope
- 2, // 1: containerd.services.events.ttrpc.v1.Envelope.timestamp:type_name -> google.protobuf.Timestamp
- 3, // 2: containerd.services.events.ttrpc.v1.Envelope.event:type_name -> google.protobuf.Any
- 0, // 3: containerd.services.events.ttrpc.v1.Events.Forward:input_type -> containerd.services.events.ttrpc.v1.ForwardRequest
- 4, // 4: containerd.services.events.ttrpc.v1.Events.Forward:output_type -> google.protobuf.Empty
- 4, // [4:5] is the sub-list for method output_type
- 3, // [3:4] is the sub-list for method input_type
- 3, // [3:3] is the sub-list for extension type_name
- 3, // [3:3] is the sub-list for extension extendee
- 0, // [0:3] is the sub-list for field type_name
+ 1, // 0: containerd.services.events.ttrpc.v1.ForwardRequest.envelope:type_name -> containerd.types.Envelope
+ 0, // 1: containerd.services.events.ttrpc.v1.Events.Forward:input_type -> containerd.services.events.ttrpc.v1.ForwardRequest
+ 2, // 2: containerd.services.events.ttrpc.v1.Events.Forward:output_type -> google.protobuf.Empty
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_init() }
@@ -258,18 +165,6 @@ func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_p
return nil
}
}
- file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Envelope); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -277,7 +172,7 @@ func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_p
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc,
NumEnums: 0,
- NumMessages: 2,
+ NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto
index 690e258ff86f..e20e66601899 100644
--- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto
+++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto
@@ -18,10 +18,8 @@ syntax = "proto3";
package containerd.services.events.ttrpc.v1;
-import "github.com/containerd/containerd/api/types/fieldpath.proto";
-import "google/protobuf/any.proto";
+import "github.com/containerd/containerd/api/types/event.proto";
import "google/protobuf/empty.proto";
-import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/ttrpc/events/v1;events";
@@ -35,13 +33,5 @@ service Events {
}
message ForwardRequest {
- Envelope envelope = 1;
-}
-
-message Envelope {
- option (containerd.types.fieldpath) = true;
- google.protobuf.Timestamp timestamp = 1;
- string namespace = 2;
- string topic = 3;
- google.protobuf.Any event = 4;
+ containerd.types.Envelope envelope = 1;
}
diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go
deleted file mode 100644
index ad48127be919..000000000000
--- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT.
-// source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto
-package events
-
-import (
- v2 "github.com/containerd/typeurl/v2"
-)
-
-// Field returns the value for the given fieldpath as a string, if defined.
-// If the value is not defined, the second value will be false.
-func (m *ForwardRequest) Field(fieldpath []string) (string, bool) {
- if len(fieldpath) == 0 {
- return "", false
- }
- switch fieldpath[0] {
- case "envelope":
- // NOTE(stevvooe): This is probably not correct in many cases.
- // We assume that the target message also implements the Field
- // method, which isn't likely true in a lot of cases.
- //
- // If you have a broken build and have found this comment,
- // you may be closer to a solution.
- if m.Envelope == nil {
- return "", false
- }
- return m.Envelope.Field(fieldpath[1:])
- }
- return "", false
-}
-
-// Field returns the value for the given fieldpath as a string, if defined.
-// If the value is not defined, the second value will be false.
-func (m *Envelope) Field(fieldpath []string) (string, bool) {
- if len(fieldpath) == 0 {
- return "", false
- }
- switch fieldpath[0] {
- // unhandled: timestamp
- case "namespace":
- return string(m.Namespace), len(m.Namespace) > 0
- case "topic":
- return string(m.Topic), len(m.Topic) > 0
- case "event":
- decoded, err := v2.UnmarshalAny(m.Event)
- if err != nil {
- return "", false
- }
- adaptor, ok := decoded.(interface{ Field([]string) (string, bool) })
- if !ok {
- return "", false
- }
- return adaptor.Field(fieldpath[1:])
- }
- return "", false
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go
new file mode 100644
index 000000000000..c284f14e72d8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go
@@ -0,0 +1,45 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/version/v1/version.proto
+package version
+
+import (
+ context "context"
+ ttrpc "github.com/containerd/ttrpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCVersionService interface {
+ Version(context.Context, *emptypb.Empty) (*VersionResponse, error)
+}
+
+func RegisterTTRPCVersionService(srv *ttrpc.Server, svc TTRPCVersionService) {
+ srv.RegisterService("containerd.services.version.v1.Version", &ttrpc.ServiceDesc{
+ Methods: map[string]ttrpc.Method{
+ "Version": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+ var req emptypb.Empty
+ if err := unmarshal(&req); err != nil {
+ return nil, err
+ }
+ return svc.Version(ctx, &req)
+ },
+ },
+ })
+}
+
+type ttrpcversionClient struct {
+ client *ttrpc.Client
+}
+
+func NewTTRPCVersionClient(client *ttrpc.Client) TTRPCVersionService {
+ return &ttrpcversionClient{
+ client: client,
+ }
+}
+
+func (c *ttrpcversionClient) Version(ctx context.Context, req *emptypb.Empty) (*VersionResponse, error) {
+ var resp VersionResponse
+ if err := c.client.Call(ctx, "containerd.services.version.v1.Version", "Version", req, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/event.pb.go b/vendor/github.com/containerd/containerd/api/types/event.pb.go
new file mode 100644
index 000000000000..6ebe1e26ddb2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/event.pb.go
@@ -0,0 +1,209 @@
+//
+//Copyright The containerd Authors.
+//
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.20.1
+// source: github.com/containerd/containerd/api/types/event.proto
+
+package types
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Envelope struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
+ Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
+}
+
+func (x *Envelope) Reset() {
+ *x = Envelope{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Envelope) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Envelope) ProtoMessage() {}
+
+func (x *Envelope) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
+func (*Envelope) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_types_event_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Envelope) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
+ }
+ return nil
+}
+
+func (x *Envelope) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *Envelope) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *Envelope) GetEvent() *anypb.Any {
+ if x != nil {
+ return x.Event
+ }
+ return nil
+}
+
+var File_github_com_containerd_containerd_api_types_event_proto protoreflect.FileDescriptor
+
+var file_github_com_containerd_containerd_api_types_event_proto_rawDesc = []byte{
+ 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x76, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12,
+ 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
+ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a,
+ 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x42,
+ 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_github_com_containerd_containerd_api_types_event_proto_rawDescOnce sync.Once
+ file_github_com_containerd_containerd_api_types_event_proto_rawDescData = file_github_com_containerd_containerd_api_types_event_proto_rawDesc
+)
+
+func file_github_com_containerd_containerd_api_types_event_proto_rawDescGZIP() []byte {
+ file_github_com_containerd_containerd_api_types_event_proto_rawDescOnce.Do(func() {
+ file_github_com_containerd_containerd_api_types_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_event_proto_rawDescData)
+ })
+ return file_github_com_containerd_containerd_api_types_event_proto_rawDescData
+}
+
+var file_github_com_containerd_containerd_api_types_event_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_github_com_containerd_containerd_api_types_event_proto_goTypes = []interface{}{
+ (*Envelope)(nil), // 0: containerd.types.Envelope
+ (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
+ (*anypb.Any)(nil), // 2: google.protobuf.Any
+}
+var file_github_com_containerd_containerd_api_types_event_proto_depIdxs = []int32{
+ 1, // 0: containerd.types.Envelope.timestamp:type_name -> google.protobuf.Timestamp
+ 2, // 1: containerd.types.Envelope.event:type_name -> google.protobuf.Any
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_github_com_containerd_containerd_api_types_event_proto_init() }
+func file_github_com_containerd_containerd_api_types_event_proto_init() {
+ if File_github_com_containerd_containerd_api_types_event_proto != nil {
+ return
+ }
+ file_github_com_containerd_containerd_api_types_fieldpath_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Envelope); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_containerd_containerd_api_types_event_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_containerd_containerd_api_types_event_proto_goTypes,
+ DependencyIndexes: file_github_com_containerd_containerd_api_types_event_proto_depIdxs,
+ MessageInfos: file_github_com_containerd_containerd_api_types_event_proto_msgTypes,
+ }.Build()
+ File_github_com_containerd_containerd_api_types_event_proto = out.File
+ file_github_com_containerd_containerd_api_types_event_proto_rawDesc = nil
+ file_github_com_containerd_containerd_api_types_event_proto_goTypes = nil
+ file_github_com_containerd_containerd_api_types_event_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/event.proto b/vendor/github.com/containerd/containerd/api/types/event.proto
new file mode 100644
index 000000000000..a73bc9d45077
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/event.proto
@@ -0,0 +1,33 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+syntax = "proto3";
+
+package containerd.types;
+
+import "github.com/containerd/containerd/api/types/fieldpath.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/containerd/containerd/api/types;types";
+
+message Envelope {
+ option (containerd.types.fieldpath) = true;
+ google.protobuf.Timestamp timestamp = 1;
+ string namespace = 2;
+ string topic = 3;
+ google.protobuf.Any event = 4;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/introspection.pb.go b/vendor/github.com/containerd/containerd/api/types/introspection.pb.go
new file mode 100644
index 000000000000..2f9c2ac44957
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/introspection.pb.go
@@ -0,0 +1,375 @@
+//
+//Copyright The containerd Authors.
+//
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.20.1
+// source: github.com/containerd/containerd/api/types/introspection.proto
+
+package types
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type RuntimeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RuntimePath string `protobuf:"bytes,1,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"`
+ // Options correspond to CreateTaskRequest.options.
+ // This is needed to pass the runc binary path, etc.
+ Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
+}
+
+func (x *RuntimeRequest) Reset() {
+ *x = RuntimeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeRequest) ProtoMessage() {}
+
+func (x *RuntimeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeRequest.ProtoReflect.Descriptor instead.
+func (*RuntimeRequest) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RuntimeRequest) GetRuntimePath() string {
+ if x != nil {
+ return x.RuntimePath
+ }
+ return ""
+}
+
+func (x *RuntimeRequest) GetOptions() *anypb.Any {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+type RuntimeVersion struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
+}
+
+func (x *RuntimeVersion) Reset() {
+ *x = RuntimeVersion{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeVersion) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeVersion) ProtoMessage() {}
+
+func (x *RuntimeVersion) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeVersion.ProtoReflect.Descriptor instead.
+func (*RuntimeVersion) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RuntimeVersion) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *RuntimeVersion) GetRevision() string {
+ if x != nil {
+ return x.Revision
+ }
+ return ""
+}
+
+type RuntimeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version *RuntimeVersion `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)
+ Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
+ // OCI-compatible runtimes should use /~https://github.com/opencontainers/runtime-spec/blob/main/features.md
+ Features *anypb.Any `protobuf:"bytes,4,opt,name=features,proto3" json:"features,omitempty"`
+ // Annotations of the shim. Irrelevant to features.Annotations.
+ Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *RuntimeInfo) Reset() {
+ *x = RuntimeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeInfo) ProtoMessage() {}
+
+func (x *RuntimeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeInfo.ProtoReflect.Descriptor instead.
+func (*RuntimeInfo) Descriptor() ([]byte, []int) {
+ return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *RuntimeInfo) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *RuntimeInfo) GetVersion() *RuntimeVersion {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (x *RuntimeInfo) GetOptions() *anypb.Any {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+func (x *RuntimeInfo) GetFeatures() *anypb.Any {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
+func (x *RuntimeInfo) GetAnnotations() map[string]string {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+var File_github_com_containerd_containerd_api_types_introspection_proto protoreflect.FileDescriptor
+
+var file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc = []byte{
+ 0x0a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74,
+ 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
+ 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x61,
+ 0x74, 0x68, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x46, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a,
+ 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x52,
+ 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a,
+ 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
+ 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x66, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x0b,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e,
+ 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32,
+ 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_github_com_containerd_containerd_api_types_introspection_proto_rawDescOnce sync.Once
+ file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData = file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc
+)
+
+func file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP() []byte {
+ file_github_com_containerd_containerd_api_types_introspection_proto_rawDescOnce.Do(func() {
+ file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData)
+ })
+ return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData
+}
+
+var file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_github_com_containerd_containerd_api_types_introspection_proto_goTypes = []interface{}{
+ (*RuntimeRequest)(nil), // 0: containerd.types.RuntimeRequest
+ (*RuntimeVersion)(nil), // 1: containerd.types.RuntimeVersion
+ (*RuntimeInfo)(nil), // 2: containerd.types.RuntimeInfo
+ nil, // 3: containerd.types.RuntimeInfo.AnnotationsEntry
+ (*anypb.Any)(nil), // 4: google.protobuf.Any
+}
+var file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs = []int32{
+ 4, // 0: containerd.types.RuntimeRequest.options:type_name -> google.protobuf.Any
+ 1, // 1: containerd.types.RuntimeInfo.version:type_name -> containerd.types.RuntimeVersion
+ 4, // 2: containerd.types.RuntimeInfo.options:type_name -> google.protobuf.Any
+ 4, // 3: containerd.types.RuntimeInfo.features:type_name -> google.protobuf.Any
+ 3, // 4: containerd.types.RuntimeInfo.annotations:type_name -> containerd.types.RuntimeInfo.AnnotationsEntry
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_github_com_containerd_containerd_api_types_introspection_proto_init() }
+func file_github_com_containerd_containerd_api_types_introspection_proto_init() {
+ if File_github_com_containerd_containerd_api_types_introspection_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeVersion); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_containerd_containerd_api_types_introspection_proto_goTypes,
+ DependencyIndexes: file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs,
+ MessageInfos: file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes,
+ }.Build()
+ File_github_com_containerd_containerd_api_types_introspection_proto = out.File
+ file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc = nil
+ file_github_com_containerd_containerd_api_types_introspection_proto_goTypes = nil
+ file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/introspection.proto b/vendor/github.com/containerd/containerd/api/types/introspection.proto
new file mode 100644
index 000000000000..8f3fcb5a4827
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/introspection.proto
@@ -0,0 +1,46 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+syntax = "proto3";
+
+package containerd.types;
+
+import "google/protobuf/any.proto";
+
+option go_package = "github.com/containerd/containerd/api/types;types";
+
+message RuntimeRequest {
+ string runtime_path = 1;
+ // Options correspond to CreateTaskRequest.options.
+ // This is needed to pass the runc binary path, etc.
+ google.protobuf.Any options = 2;
+}
+
+message RuntimeVersion {
+ string version = 1;
+ string revision = 2;
+}
+
+message RuntimeInfo {
+ string name = 1;
+ RuntimeVersion version = 2;
+ // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)
+ google.protobuf.Any options = 3;
+ // OCI-compatible runtimes should use /~https://github.com/opencontainers/runtime-spec/blob/main/features.md
+ google.protobuf.Any features = 4;
+ // Annotations of the shim. Irrelevant to features.Annotations.
+ map annotations = 5;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
index 3e206cbafbda..daa62b834e12 100644
--- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
@@ -45,6 +45,7 @@ type Platform struct {
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
+ OSVersion string `protobuf:"bytes,4,opt,name=os_version,json=osVersion,proto3" json:"os_version,omitempty"`
}
func (x *Platform) Reset() {
@@ -100,6 +101,13 @@ func (x *Platform) GetVariant() string {
return ""
}
+func (x *Platform) GetOsVersion() string {
+ if x != nil {
+ return x.OSVersion
+ }
+ return ""
+}
+
var File_github_com_containerd_containerd_api_types_platform_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []byte{
@@ -107,17 +115,19 @@ var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []b
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61,
0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x58, 0x0a,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x77, 0x0a,
0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63,
0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a,
0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
- 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75,
- 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
- 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f,
- 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
+ 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x73, 0x5f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x73, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto
index b6088251f007..0b9180016de4 100644
--- a/vendor/github.com/containerd/containerd/api/types/platform.proto
+++ b/vendor/github.com/containerd/containerd/api/types/platform.proto
@@ -26,4 +26,5 @@ message Platform {
string os = 1;
string architecture = 2;
string variant = 3;
+ string os_version = 4;
}
diff --git a/vendor/github.com/containerd/containerd/api/types/platform_helpers.go b/vendor/github.com/containerd/containerd/api/types/platform_helpers.go
new file mode 100644
index 000000000000..d8c1a6877052
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/platform_helpers.go
@@ -0,0 +1,49 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package types
+
+import oci "github.com/opencontainers/image-spec/specs-go/v1"
+
+// OCIPlatformToProto converts from a slice of OCI [specs.Platform] to a
+// slice of the protobuf definition [Platform].
+func OCIPlatformToProto(platforms []oci.Platform) []*Platform {
+ ap := make([]*Platform, len(platforms))
+ for i := range platforms {
+ ap[i] = &Platform{
+ OS: platforms[i].OS,
+ OSVersion: platforms[i].OSVersion,
+ Architecture: platforms[i].Architecture,
+ Variant: platforms[i].Variant,
+ }
+ }
+ return ap
+}
+
+// OCIPlatformFromProto converts a slice of the protobuf definition [Platform]
+// to a slice of OCI [specs.Platform].
+func OCIPlatformFromProto(platforms []*Platform) []oci.Platform {
+ op := make([]oci.Platform, len(platforms))
+ for i := range platforms {
+ op[i] = oci.Platform{
+ OS: platforms[i].OS,
+ OSVersion: platforms[i].OSVersion,
+ Architecture: platforms[i].Architecture,
+ Variant: platforms[i].Variant,
+ }
+ }
+ return op
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go
index 67594f416c81..77888bf33208 100644
--- a/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go
@@ -59,6 +59,8 @@ type Sandbox struct {
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
// Extensions allow clients to provide optional blobs that can be handled by runtime.
Extensions map[string]*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Sandboxer is the name of the sandbox controller who manages the sandbox.
+ Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
}
func (x *Sandbox) Reset() {
@@ -142,6 +144,13 @@ func (x *Sandbox) GetExtensions() map[string]*anypb.Any {
return nil
}
+func (x *Sandbox) GetSandboxer() string {
+ if x != nil {
+ return x.Sandboxer
+ }
+ return ""
+}
+
type Sandbox_Runtime struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -211,7 +220,7 @@ var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []by
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x04, 0x0a, 0x07, 0x53, 0x61, 0x6e,
+ 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8c, 0x05, 0x0a, 0x07, 0x53, 0x61, 0x6e,
0x64, 0x62, 0x6f, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
0x78, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02,
@@ -236,25 +245,27 @@ var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []by
0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x45,
0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
- 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75,
- 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
- 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62,
- 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74,
- 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70,
- 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61,
+ 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
+ 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/containerd/containerd/api/types/sandbox.proto b/vendor/github.com/containerd/containerd/api/types/sandbox.proto
index b607706194b2..b0bf233b954b 100644
--- a/vendor/github.com/containerd/containerd/api/types/sandbox.proto
+++ b/vendor/github.com/containerd/containerd/api/types/sandbox.proto
@@ -41,11 +41,14 @@ message Sandbox {
// bundle directory (similary to OCI spec).
google.protobuf.Any spec = 3;
// Labels provides an area to include arbitrary data on containers.
- map labels = 4;
+ map labels = 4;
// CreatedAt is the time the container was first created.
google.protobuf.Timestamp created_at = 5;
// UpdatedAt is the last time the container was mutated.
google.protobuf.Timestamp updated_at = 6;
// Extensions allow clients to provide optional blobs that can be handled by runtime.
map extensions = 7;
+ // Sandboxer is the name of the sandbox controller who manages the sandbox.
+ string sandboxer = 10;
+
}
diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go
index 62d12b34db37..d1fb1625dff7 100644
--- a/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go
@@ -22,6 +22,7 @@
package transfer
import (
+ types "github.com/containerd/containerd/api/types"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@@ -40,11 +41,12 @@ type Progress struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"`
- Progress int64 `protobuf:"varint,4,opt,name=progress,proto3" json:"progress,omitempty"`
- Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+ Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"`
+ Progress int64 `protobuf:"varint,4,opt,name=progress,proto3" json:"progress,omitempty"`
+ Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+ Desc *types.Descriptor `protobuf:"bytes,6,opt,name=desc,proto3" json:"desc,omitempty"`
}
func (x *Progress) Reset() {
@@ -114,6 +116,13 @@ func (x *Progress) GetTotal() int64 {
return 0
}
+func (x *Progress) GetDesc() *types.Descriptor {
+ if x != nil {
+ return x.Desc
+ }
+ return nil
+}
+
var File_github_com_containerd_containerd_api_types_transfer_progress_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = []byte{
@@ -122,20 +131,26 @@ var file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawD
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
- 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x22,
- 0x80, 0x01, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73,
- 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74,
- 0x61, 0x6c, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x1a,
+ 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x01, 0x0a,
+ 0x08, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x76, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a,
+ 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74,
+ 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12,
+ 0x30, 0x0a, 0x04, 0x64, 0x65, 0x73, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x64, 0x65, 0x73,
+ 0x63, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -152,14 +167,16 @@ func file_github_com_containerd_containerd_api_types_transfer_progress_proto_raw
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = []interface{}{
- (*Progress)(nil), // 0: containerd.types.transfer.Progress
+ (*Progress)(nil), // 0: containerd.types.transfer.Progress
+ (*types.Descriptor)(nil), // 1: containerd.types.Descriptor
}
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
+ 1, // 0: containerd.types.transfer.Progress.desc:type_name -> containerd.types.Descriptor
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() }
diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto b/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto
index 3059bcbb12a4..094c363b2132 100644
--- a/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto
+++ b/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto
@@ -18,6 +18,8 @@ syntax = "proto3";
package containerd.types.transfer;
+import "github.com/containerd/containerd/api/types/descriptor.proto";
+
option go_package = "github.com/containerd/containerd/api/types/transfer";
message Progress {
@@ -26,4 +28,5 @@ message Progress {
repeated string parents = 3;
int64 progress = 4;
int64 total = 5;
+ containerd.types.Descriptor desc = 6;
}
diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go
index 57bb80bae9e5..1c67b7591dbf 100644
--- a/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go
@@ -159,7 +159,9 @@ type RegistryResolver struct {
// made on.
AuthStream string `protobuf:"bytes,1,opt,name=auth_stream,json=authStream,proto3" json:"auth_stream,omitempty"`
// Headers
- Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ HostDir string `protobuf:"bytes,3,opt,name=host_dir,json=hostDir,proto3" json:"host_dir,omitempty"`
+ DefaultScheme string `protobuf:"bytes,4,opt,name=default_scheme,json=defaultScheme,proto3" json:"default_scheme,omitempty"`
}
func (x *RegistryResolver) Reset() {
@@ -208,6 +210,20 @@ func (x *RegistryResolver) GetHeaders() map[string]string {
return nil
}
+func (x *RegistryResolver) GetHostDir() string {
+ if x != nil {
+ return x.HostDir
+ }
+ return ""
+}
+
+func (x *RegistryResolver) GetDefaultScheme() string {
+ if x != nil {
+ return x.DefaultScheme
+ }
+ return ""
+}
+
// AuthRequest is sent as a callback on a stream
type AuthRequest struct {
state protoimpl.MessageState
@@ -364,7 +380,7 @@ var file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawD
0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69,
0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x08, 0x72, 0x65,
- 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73,
+ 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x22, 0x85, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73,
0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61,
0x75, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x07,
@@ -373,37 +389,41 @@ var file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawD
0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65,
0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
- 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
- 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x69, 0x0a, 0x0b,
- 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68,
- 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12,
- 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a,
- 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65,
- 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68,
- 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63,
- 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65,
- 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a,
- 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78,
- 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x2a, 0x3e, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b,
- 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a,
- 0x07, 0x52, 0x45, 0x46, 0x52, 0x45, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45,
- 0x41, 0x44, 0x45, 0x52, 0x10, 0x03, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74,
- 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x12, 0x19, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x44, 0x69, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x69,
+ 0x0a, 0x0b, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
+ 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73,
+ 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12,
+ 0x28, 0x0a, 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74,
+ 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0c, 0x41, 0x75,
+ 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x61, 0x75,
+ 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70,
+ 0x65, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x37, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08,
+ 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x2a, 0x3e, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f,
+ 0x0a, 0x0b, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x53, 0x10, 0x01, 0x12,
+ 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x46, 0x52, 0x45, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06,
+ 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x03, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
+ 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto b/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto
index 0b3ce68b4c4b..3331ecb7820e 100644
--- a/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto
+++ b/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto
@@ -35,9 +35,10 @@ message RegistryResolver {
// Headers
map headers = 2;
- // Allow custom hosts dir?
+ string host_dir = 3;
+
+ string default_scheme = 4;
// Force skip verify
- // Force HTTP
// CA callback? Client TLS callback?
}
diff --git a/vendor/github.com/containerd/containerd/v2/LICENSE b/vendor/github.com/containerd/containerd/v2/LICENSE
new file mode 100644
index 000000000000..584149b6ee28
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/containerd/v2/NOTICE b/vendor/github.com/containerd/containerd/v2/NOTICE
new file mode 100644
index 000000000000..8915f02773f5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/NOTICE
@@ -0,0 +1,16 @@
+Docker
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/containerd/containerd/v2/core/content/adaptor.go b/vendor/github.com/containerd/containerd/v2/core/content/adaptor.go
new file mode 100644
index 000000000000..4ba6cc74d3ce
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/content/adaptor.go
@@ -0,0 +1,52 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package content
+
+import (
+ "strings"
+
+ "github.com/containerd/containerd/v2/pkg/filters"
+)
+
+// AdaptInfo returns `filters.Adaptor` that handles `content.Info`.
+func AdaptInfo(info Info) filters.Adaptor {
+ return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+ if len(fieldpath) == 0 {
+ return "", false
+ }
+
+ switch fieldpath[0] {
+ case "digest":
+ return info.Digest.String(), true
+ case "size":
+ // TODO: support size based filtering
+ case "labels":
+ return checkMap(fieldpath[1:], info.Labels)
+ }
+
+ return "", false
+ })
+}
+
+func checkMap(fieldpath []string, m map[string]string) (string, bool) {
+ if len(m) == 0 {
+ return "", false
+ }
+
+ value, ok := m[strings.Join(fieldpath, ".")]
+ return value, ok
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/content/content.go b/vendor/github.com/containerd/containerd/v2/core/content/content.go
new file mode 100644
index 000000000000..66b42a9cb9a7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/content/content.go
@@ -0,0 +1,212 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package content
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Store combines the methods of content-oriented interfaces into a set that
+// are commonly provided by complete implementations.
+//
+// Overall content lifecycle:
+// - Ingester is used to initiate a write operation (aka ingestion)
+// - IngestManager is used to manage (e.g. list, abort) active ingestions
+// - Once an ingestion is complete (see Writer.Commit), Provider is used to
+// query a single piece of content by its digest
+// - Manager is used to manage (e.g. list, delete) previously committed content
+//
+// Note that until ingestion is complete, its content is not visible through
+// Provider or Manager. Once ingestion is complete, it is no longer exposed
+// through IngestManager.
+type Store interface {
+ Manager
+ Provider
+ IngestManager
+ Ingester
+}
+
+// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
+type ReaderAt interface {
+ io.ReaderAt
+ io.Closer
+ Size() int64
+}
+
+// Provider provides a reader interface for specific content
+type Provider interface {
+ // ReaderAt only requires desc.Digest to be set.
+ // Other fields in the descriptor may be used internally for resolving
+ // the location of the actual data.
+ ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error)
+}
+
+// Ingester writes content
+type Ingester interface {
+ // Writer initiates a writing operation (aka ingestion). A single ingestion
+ // is uniquely identified by its ref, provided using a WithRef option.
+ // Writer can be called multiple times with the same ref to access the same
+ // ingestion.
+ // Once all the data is written, use Writer.Commit to complete the ingestion.
+ Writer(ctx context.Context, opts ...WriterOpt) (Writer, error)
+}
+
+// IngestManager provides methods for managing ingestions. An ingestion is a
+// not-yet-complete writing operation initiated using Ingester and identified
+// by a ref string.
+type IngestManager interface {
+ // Status returns the status of the provided ref.
+ Status(ctx context.Context, ref string) (Status, error)
+
+ // ListStatuses returns the status of any active ingestions whose ref match
+ // the provided regular expression. If empty, all active ingestions will be
+ // returned.
+ ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
+
+ // Abort completely cancels the ingest operation targeted by ref.
+ Abort(ctx context.Context, ref string) error
+}
+
+// Info holds content specific information
+type Info struct {
+ Digest digest.Digest
+ Size int64
+ CreatedAt time.Time
+ UpdatedAt time.Time
+ Labels map[string]string
+}
+
+// Status of a content operation (i.e. an ingestion)
+type Status struct {
+ Ref string
+ Offset int64
+ Total int64
+ Expected digest.Digest
+ StartedAt time.Time
+ UpdatedAt time.Time
+}
+
+// WalkFunc defines the callback for a blob walk.
+type WalkFunc func(Info) error
+
+// InfoReaderProvider provides both info and reader for the specific content.
+type InfoReaderProvider interface {
+ InfoProvider
+ Provider
+}
+
+// InfoProvider provides info for content inspection.
+type InfoProvider interface {
+ // Info will return metadata about content available in the content store.
+ //
+ // If the content is not present, ErrNotFound will be returned.
+ Info(ctx context.Context, dgst digest.Digest) (Info, error)
+}
+
+// Manager provides methods for inspecting, listing and removing content.
+type Manager interface {
+ InfoProvider
+
+ // Update updates mutable information related to content.
+ // If one or more fieldpaths are provided, only those
+ // fields will be updated.
+ // Mutable fields:
+ // labels.*
+ Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
+
+ // Walk will call fn for each item in the content store which
+ // match the provided filters. If no filters are given all
+ // items will be walked.
+ Walk(ctx context.Context, fn WalkFunc, filters ...string) error
+
+ // Delete removes the content from the store.
+ Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// Writer handles writing of content into a content store
+type Writer interface {
+ // Close closes the writer, if the writer has not been
+ // committed this allows resuming or aborting.
+ // Calling Close on a closed writer will not error.
+ io.WriteCloser
+
+ // Digest may return empty digest or panics until committed.
+ Digest() digest.Digest
+
+ // Commit commits the blob (but no roll-back is guaranteed on an error).
+ // size and expected can be zero-value when unknown.
+ // Commit always closes the writer, even on error.
+ // ErrAlreadyExists aborts the writer.
+ Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
+
+ // Status returns the current state of write
+ Status() (Status, error)
+
+ // Truncate updates the size of the target blob
+ Truncate(size int64) error
+}
+
+type Syncer interface {
+ // Sync flushes the in-flight writes to the disk (when applicable)
+ Sync() error
+}
+
+// Opt is used to alter the mutable properties of content
+type Opt func(*Info) error
+
+// WithLabels allows labels to be set on content
+func WithLabels(labels map[string]string) Opt {
+ return func(info *Info) error {
+ info.Labels = labels
+ return nil
+ }
+}
+
+// WriterOpts is internally used by WriterOpt.
+type WriterOpts struct {
+ Ref string
+ Desc ocispec.Descriptor
+}
+
+// WriterOpt is used for passing options to Ingester.Writer.
+type WriterOpt func(*WriterOpts) error
+
+// WithDescriptor specifies an OCI descriptor.
+// Writer may optionally use the descriptor internally for resolving
+// the location of the actual data.
+// Write does not require any field of desc to be set.
+// If the data size is unknown, desc.Size should be set to 0.
+// Some implementations may also accept negative values as "unknown".
+func WithDescriptor(desc ocispec.Descriptor) WriterOpt {
+ return func(opts *WriterOpts) error {
+ opts.Desc = desc
+ return nil
+ }
+}
+
+// WithRef specifies a ref string.
+func WithRef(ref string) WriterOpt {
+ return func(opts *WriterOpts) error {
+ opts.Ref = ref
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/content/helpers.go b/vendor/github.com/containerd/containerd/v2/core/content/helpers.go
new file mode 100644
index 000000000000..74cb566b02b9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/content/helpers.go
@@ -0,0 +1,353 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package content
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/internal/randutil"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var ErrReset = errors.New("writer has been reset")
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ buffer := make([]byte, 1<<20)
+ return &buffer
+ },
+}
+
+type reader interface {
+ Reader() io.Reader
+}
+
+// NewReader returns a io.Reader from a ReaderAt
+func NewReader(ra ReaderAt) io.Reader {
+ if rd, ok := ra.(reader); ok {
+ return rd.Reader()
+ }
+ return io.NewSectionReader(ra, 0, ra.Size())
+}
+
+type nopCloserBytesReader struct {
+ *bytes.Reader
+}
+
+func (*nopCloserBytesReader) Close() error { return nil }
+
+type nopCloserSectionReader struct {
+ *io.SectionReader
+}
+
+func (*nopCloserSectionReader) Close() error { return nil }
+
+// BlobReadSeeker returns a read seeker for the blob from the provider.
+func BlobReadSeeker(ctx context.Context, provider Provider, desc ocispec.Descriptor) (io.ReadSeekCloser, error) {
+ if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest {
+ return &nopCloserBytesReader{bytes.NewReader(desc.Data)}, nil
+ }
+
+ ra, err := provider.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ return &nopCloserSectionReader{io.NewSectionReader(ra, 0, ra.Size())}, nil
+}
+
+// ReadBlob retrieves the entire contents of the blob from the provider.
+//
+// Avoid using this for large blobs, such as layers.
+func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
+ if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest {
+ return desc.Data, nil
+ }
+
+ ra, err := provider.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ defer ra.Close()
+
+ p := make([]byte, ra.Size())
+
+ n, err := ra.ReadAt(p, 0)
+ if err == io.EOF {
+ if int64(n) != ra.Size() {
+ err = io.ErrUnexpectedEOF
+ } else {
+ err = nil
+ }
+ }
+ return p, err
+}
+
+// WriteBlob writes data with the expected digest into the content store. If
+// expected already exists, the method returns immediately and the reader will
+// not be consumed.
+//
+// This is useful when the digest and size are known beforehand.
+//
+// Copy is buffered, so no need to wrap reader in buffered io.
+func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error {
+ cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
+ if err != nil {
+ if !errdefs.IsAlreadyExists(err) {
+ return fmt.Errorf("failed to open writer: %w", err)
+ }
+
+ return nil // already present
+ }
+ defer cw.Close()
+
+ return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...)
+}
+
+// OpenWriter opens a new writer for the given reference, retrying if the writer
+// is locked until the reference is available or returns an error.
+func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) {
+ var (
+ cw Writer
+ err error
+ retry = 16
+ )
+ for {
+ cw, err = cs.Writer(ctx, opts...)
+ if err != nil {
+ if !errdefs.IsUnavailable(err) {
+ return nil, err
+ }
+
+ // TODO: Check status to determine if the writer is active,
+ // continue waiting while active, otherwise return lock
+ // error or abort. Requires asserting for an ingest manager
+
+ select {
+ case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))):
+ if retry < 2048 {
+ retry = retry << 1
+ }
+ continue
+ case <-ctx.Done():
+ // Propagate lock error
+ return nil, err
+ }
+
+ }
+ break
+ }
+
+ return cw, err
+}
+
+// Copy copies data with the expected digest from the reader into the
+// provided content store writer. This copy commits the writer.
+//
+// This is useful when the digest and size are known beforehand. When
+// the size or digest is unknown, these values may be empty.
+//
+// Copy is buffered, so no need to wrap reader in buffered io.
+func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
+ r := or
+ for i := 0; ; i++ {
+ if i >= 1 {
+ log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset")
+ }
+
+ ws, err := cw.Status()
+ if err != nil {
+ return fmt.Errorf("failed to get status: %w", err)
+ }
+ // Reset the original reader if
+ // 1. there is an offset, or
+ // 2. this is a retry due to Reset error
+ if ws.Offset > 0 || i > 0 {
+ r, err = seekReader(or, ws.Offset, size)
+ if err != nil {
+ return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
+ }
+ }
+
+ copied, err := copyWithBuffer(cw, r)
+ if errors.Is(err, ErrReset) {
+ continue
+ }
+ if err != nil {
+ return fmt.Errorf("failed to copy: %w", err)
+ }
+ if size != 0 && copied < size-ws.Offset {
+ // Short writes would return its own error, this indicates a read failure
+ return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
+ }
+ if err := cw.Commit(ctx, size, expected, opts...); err != nil {
+ if errors.Is(err, ErrReset) {
+ continue
+ }
+ if !errdefs.IsAlreadyExists(err) {
+ return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
+ }
+ }
+ return nil
+ }
+}
+
+// CopyReaderAt copies to a writer from a given reader at for the given
+// number of bytes. This copy does not commit the writer.
+func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
+ ws, err := cw.Status()
+ if err != nil {
+ return err
+ }
+
+ copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
+ if err != nil {
+ return fmt.Errorf("failed to copy: %w", err)
+ }
+ if copied < n {
+ // Short writes would return its own error, this indicates a read failure
+ return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
+ }
+ return nil
+}
+
+// CopyReader copies to a writer from a given reader, returning
+// the number of bytes copied.
+// Note: if the writer has a non-zero offset, the total number
+// of bytes read may be greater than those copied if the reader
+// is not an io.Seeker.
+// This copy does not commit the writer.
+func CopyReader(cw Writer, r io.Reader) (int64, error) {
+ ws, err := cw.Status()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get status: %w", err)
+ }
+
+ if ws.Offset > 0 {
+ r, err = seekReader(r, ws.Offset, 0)
+ if err != nil {
+ return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
+ }
+ }
+
+ return copyWithBuffer(cw, r)
+}
+
+// seekReader attempts to seek the reader to the given offset, either by
+// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
+// up to the given offset.
+func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
+ // attempt to resolve r as a seeker and setup the offset.
+ seeker, ok := r.(io.Seeker)
+ if ok {
+ nn, err := seeker.Seek(offset, io.SeekStart)
+ if nn != offset {
+ if err == nil {
+ err = fmt.Errorf("unexpected seek location without seek error")
+ }
+ return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return r, nil
+ }
+
+ // ok, let's try io.ReaderAt!
+ readerAt, ok := r.(io.ReaderAt)
+ if ok && size > offset {
+ sr := io.NewSectionReader(readerAt, offset, size)
+ return sr, nil
+ }
+
+ // well then, let's just discard up to the offset
+ n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset))
+ if err != nil {
+ return nil, fmt.Errorf("failed to discard to offset: %w", err)
+ }
+ if n != offset {
+ return nil, errors.New("unable to discard to offset")
+ }
+
+ return r, nil
+}
+
+// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer
+// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have
+// a full buffer before we do a write operation to dst to reduce overheads associated
+// with the write operations of small buffers.
+func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
+ // If the reader has a WriteTo method, use it to do the copy.
+ // Avoids an allocation and a copy.
+ if wt, ok := src.(io.WriterTo); ok {
+ return wt.WriteTo(dst)
+ }
+ // Similarly, if the writer has a ReadFrom method, use it to do the copy.
+ if rt, ok := dst.(io.ReaderFrom); ok {
+ return rt.ReadFrom(src)
+ }
+ bufRef := bufPool.Get().(*[]byte)
+ defer bufPool.Put(bufRef)
+ buf := *bufRef
+ for {
+ nr, er := io.ReadAtLeast(src, buf, len(buf))
+ if nr > 0 {
+ nw, ew := dst.Write(buf[0:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er != nil {
+ // If an EOF happens after reading fewer than the requested bytes,
+ // ReadAtLeast returns ErrUnexpectedEOF.
+ if er != io.EOF && er != io.ErrUnexpectedEOF {
+ err = er
+ }
+ break
+ }
+ }
+ return
+}
+
+// Exists returns whether an attempt to access the content would not error out
+// with an ErrNotFound error. It will return an encountered error if it was
+// different than ErrNotFound.
+func Exists(ctx context.Context, provider InfoProvider, desc ocispec.Descriptor) (bool, error) {
+ _, err := provider.Info(ctx, desc.Digest)
+ if errdefs.IsNotFound(err) {
+ return false, nil
+ }
+ return err == nil, err
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/annotations.go b/vendor/github.com/containerd/containerd/v2/core/images/annotations.go
new file mode 100644
index 000000000000..47d92104cddc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/annotations.go
@@ -0,0 +1,23 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+const (
+ // AnnotationImageName is an annotation on a Descriptor in an index.json
+ // containing the `Name` value as used by an `Image` struct
+ AnnotationImageName = "io.containerd.image.name"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/archive/exporter.go b/vendor/github.com/containerd/containerd/v2/core/images/archive/exporter.go
new file mode 100644
index 000000000000..3d98daea1df9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/archive/exporter.go
@@ -0,0 +1,589 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package archive
+
+import (
+ "archive/tar"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "path"
+ "sort"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/containerd/platforms"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type exportOptions struct {
+ manifests []ocispec.Descriptor
+ platform platforms.MatchComparer
+ allPlatforms bool
+ skipDockerManifest bool
+ blobRecordOptions blobRecordOptions
+}
+
+// ExportOpt defines options for configuring exported descriptors
+type ExportOpt func(context.Context, *exportOptions) error
+
+// WithPlatform defines the platform to require manifest lists have
+// not exporting all platforms.
+// Additionally, platform is used to resolve image configs for
+// Docker v1.1, v1.2 format compatibility.
+func WithPlatform(p platforms.MatchComparer) ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ o.platform = p
+ return nil
+ }
+}
+
+// WithAllPlatforms exports all manifests from a manifest list.
+// Missing content will fail the export.
+func WithAllPlatforms() ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ o.allPlatforms = true
+ return nil
+ }
+}
+
+// WithSkipDockerManifest skips creation of the Docker compatible
+// manifest.json file.
+func WithSkipDockerManifest() ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ o.skipDockerManifest = true
+ return nil
+ }
+}
+
+// WithImage adds the provided images to the exported archive.
+func WithImage(is images.Store, name string) ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ img, err := is.Get(ctx, name)
+ if err != nil {
+ return err
+ }
+
+ img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations)
+ o.manifests = append(o.manifests, img.Target)
+
+ return nil
+ }
+}
+
+// WithImages adds multiples images to the exported archive.
+func WithImages(imgs []images.Image) ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ for _, img := range imgs {
+ img.Target.Annotations = addNameAnnotation(img.Name, img.Target.Annotations)
+ o.manifests = append(o.manifests, img.Target)
+ }
+
+ return nil
+ }
+}
+
+// WithManifest adds a manifest to the exported archive.
+// When names are given they will be set on the manifest in the
+// exported archive, creating an index record for each name.
+// When no names are provided, it is up to caller to put name annotation to
+// on the manifest descriptor if needed.
+func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ if len(names) == 0 {
+ o.manifests = append(o.manifests, manifest)
+ }
+ for _, name := range names {
+ mc := manifest
+ mc.Annotations = addNameAnnotation(name, manifest.Annotations)
+ o.manifests = append(o.manifests, mc)
+ }
+
+ return nil
+ }
+}
+
+// BlobFilter returns false if the blob should not be included in the archive.
+type BlobFilter func(ocispec.Descriptor) bool
+
+// WithBlobFilter specifies BlobFilter.
+func WithBlobFilter(f BlobFilter) ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ o.blobRecordOptions.blobFilter = f
+ return nil
+ }
+}
+
+// WithSkipNonDistributableBlobs excludes non-distributable blobs such as Windows base layers.
+func WithSkipNonDistributableBlobs() ExportOpt {
+ f := func(desc ocispec.Descriptor) bool {
+ return !images.IsNonDistributable(desc.MediaType)
+ }
+ return WithBlobFilter(f)
+}
+
+// WithSkipMissing excludes blobs referenced by manifests if not all blobs
+// would be included in the archive.
+// The manifest itself is excluded only if it's not present locally.
+// This allows to export multi-platform images if not all platforms are present
+// while still persisting the multi-platform index.
+func WithSkipMissing(store content.InfoReaderProvider) ExportOpt {
+ return func(ctx context.Context, o *exportOptions) error {
+ o.blobRecordOptions.childrenHandler = images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
+ children, err := images.Children(ctx, store, desc)
+ if !images.IsManifestType(desc.MediaType) {
+ return children, err
+ }
+
+ if err != nil {
+ // If manifest itself is missing, skip it from export.
+ if errdefs.IsNotFound(err) {
+ return nil, images.ErrSkipDesc
+ }
+ return nil, err
+ }
+
+ // Don't export manifest descendants if any of them doesn't exist.
+ for _, child := range children {
+ exists, err := content.Exists(ctx, store, child)
+ if err != nil {
+ return nil, err
+ }
+
+ // If any child is missing, only export the manifest, but don't export its descendants.
+ if !exists {
+ return nil, nil
+ }
+ }
+ return children, nil
+ })
+ return nil
+ }
+}
+
+func addNameAnnotation(name string, base map[string]string) map[string]string {
+ annotations := map[string]string{}
+ for k, v := range base {
+ annotations[k] = v
+ }
+
+ annotations[images.AnnotationImageName] = name
+ annotations[ocispec.AnnotationRefName] = ociReferenceName(name)
+
+ return annotations
+}
+
+func copySourceLabels(ctx context.Context, infoProvider content.InfoProvider, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
+ info, err := infoProvider.Info(ctx, desc.Digest)
+ if err != nil {
+ return desc, err
+ }
+ for k, v := range info.Labels {
+ if strings.HasPrefix(k, labels.LabelDistributionSource) {
+ if desc.Annotations == nil {
+ desc.Annotations = map[string]string{k: v}
+ } else {
+ desc.Annotations[k] = v
+ }
+ }
+ }
+ return desc, nil
+}
+
+// Export implements Exporter.
+func Export(ctx context.Context, store content.InfoReaderProvider, writer io.Writer, opts ...ExportOpt) error {
+ var eo exportOptions
+ for _, opt := range opts {
+ if err := opt(ctx, &eo); err != nil {
+ return err
+ }
+ }
+
+ records := []tarRecord{
+ ociLayoutFile(""),
+ }
+
+ manifests := make([]ocispec.Descriptor, 0, len(eo.manifests))
+ for _, desc := range eo.manifests {
+ d, err := copySourceLabels(ctx, store, desc)
+ if err != nil {
+ log.G(ctx).WithError(err).WithField("desc", desc).Warn("failed to copy distribution.source labels")
+ continue
+ }
+ manifests = append(manifests, d)
+ }
+
+ algorithms := map[string]struct{}{}
+ dManifests := map[digest.Digest]*exportManifest{}
+ resolvedIndex := map[digest.Digest]digest.Digest{}
+ for _, desc := range manifests {
+ if images.IsManifestType(desc.MediaType) {
+ mt, ok := dManifests[desc.Digest]
+ if !ok {
+ // TODO(containerd): Skip if already added
+ r, err := getRecords(ctx, store, desc, algorithms, &eo.blobRecordOptions)
+ if err != nil {
+ return err
+ }
+ records = append(records, r...)
+
+ mt = &exportManifest{
+ manifest: desc,
+ }
+ dManifests[desc.Digest] = mt
+ }
+
+ name := desc.Annotations[images.AnnotationImageName]
+ if name != "" {
+ mt.names = append(mt.names, name)
+ }
+ } else if images.IsIndexType(desc.MediaType) {
+ d, ok := resolvedIndex[desc.Digest]
+ if !ok {
+ if err := desc.Digest.Validate(); err != nil {
+ return err
+ }
+ records = append(records, blobRecord(store, desc, &eo.blobRecordOptions))
+
+ p, err := content.ReadBlob(ctx, store, desc)
+ if err != nil {
+ return err
+ }
+
+ var index ocispec.Index
+ if err := json.Unmarshal(p, &index); err != nil {
+ return err
+ }
+
+ var manifests []ocispec.Descriptor
+ for _, m := range index.Manifests {
+ if eo.platform != nil {
+ if m.Platform == nil || eo.platform.Match(*m.Platform) {
+ manifests = append(manifests, m)
+ } else if !eo.allPlatforms {
+ continue
+ }
+ }
+
+ r, err := getRecords(ctx, store, m, algorithms, &eo.blobRecordOptions)
+ if err != nil {
+ return err
+ }
+
+ records = append(records, r...)
+ }
+
+ if len(manifests) >= 1 {
+ if len(manifests) > 1 {
+ sort.SliceStable(manifests, func(i, j int) bool {
+ if manifests[i].Platform == nil {
+ return false
+ }
+ if manifests[j].Platform == nil {
+ return true
+ }
+ return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform)
+ })
+ }
+ d = manifests[0].Digest
+ dManifests[d] = &exportManifest{
+ manifest: manifests[0],
+ }
+ } else if eo.platform != nil {
+ return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound)
+ }
+ resolvedIndex[desc.Digest] = d
+ }
+ if d != "" {
+ if name := desc.Annotations[images.AnnotationImageName]; name != "" {
+ mt := dManifests[d]
+ mt.names = append(mt.names, name)
+ }
+
+ }
+ } else {
+ return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument)
+ }
+ }
+
+ records = append(records, ociIndexRecord(manifests))
+
+ if !eo.skipDockerManifest && len(dManifests) > 0 {
+ tr, err := manifestsRecord(ctx, store, dManifests)
+ if err != nil {
+ return fmt.Errorf("unable to create manifests file: %w", err)
+ }
+
+ records = append(records, tr)
+ }
+
+ if len(algorithms) > 0 {
+ records = append(records, directoryRecord("blobs/", 0755))
+ for alg := range algorithms {
+ records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
+ }
+ }
+
+ tw := tar.NewWriter(writer)
+ defer tw.Close()
+ return writeTar(ctx, tw, records)
+}
+
+func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}, brOpts *blobRecordOptions) ([]tarRecord, error) {
+ var records []tarRecord
+ exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if err := desc.Digest.Validate(); err != nil {
+ return nil, err
+ }
+ records = append(records, blobRecord(store, desc, brOpts))
+ algorithms[desc.Digest.Algorithm().String()] = struct{}{}
+ return nil, nil
+ }
+
+ childrenHandler := brOpts.childrenHandler
+ if childrenHandler == nil {
+ childrenHandler = images.ChildrenHandler(store)
+ }
+
+ handlers := images.Handlers(
+ childrenHandler,
+ images.HandlerFunc(exportHandler),
+ )
+
+ // Walk sequentially since the number of fetches is likely one and doing in
+ // parallel requires locking the export handler
+ if err := images.Walk(ctx, handlers, desc); err != nil {
+ return nil, err
+ }
+
+ return records, nil
+}
+
+type tarRecord struct {
+ Header *tar.Header
+ CopyTo func(context.Context, io.Writer) (int64, error)
+}
+
+type blobRecordOptions struct {
+ blobFilter BlobFilter
+ childrenHandler images.HandlerFunc
+}
+
+func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOptions) tarRecord {
+ if opts != nil && opts.blobFilter != nil && !opts.blobFilter(desc) {
+ return tarRecord{}
+ }
+ return tarRecord{
+ Header: &tar.Header{
+ Name: path.Join(ocispec.ImageBlobsDir, desc.Digest.Algorithm().String(), desc.Digest.Encoded()),
+ Mode: 0444,
+ Size: desc.Size,
+ Typeflag: tar.TypeReg,
+ },
+ CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+ r, err := cs.ReaderAt(ctx, desc)
+ if err != nil {
+ return 0, fmt.Errorf("failed to get reader: %w", err)
+ }
+ defer r.Close()
+
+ // Verify digest
+ dgstr := desc.Digest.Algorithm().Digester()
+
+ n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
+ if err != nil {
+ return 0, fmt.Errorf("failed to copy to tar: %w", err)
+ }
+ if dgstr.Digest() != desc.Digest {
+ return 0, fmt.Errorf("unexpected digest %s copied", dgstr.Digest())
+ }
+ return n, nil
+ },
+ }
+}
+
+func directoryRecord(name string, mode int64) tarRecord {
+ return tarRecord{
+ Header: &tar.Header{
+ Name: name,
+ Mode: mode,
+ Typeflag: tar.TypeDir,
+ },
+ }
+}
+
+func ociLayoutFile(version string) tarRecord {
+ if version == "" {
+ version = ocispec.ImageLayoutVersion
+ }
+ layout := ocispec.ImageLayout{
+ Version: version,
+ }
+
+ b, err := json.Marshal(layout)
+ if err != nil {
+ panic(err)
+ }
+
+ return tarRecord{
+ Header: &tar.Header{
+ Name: ocispec.ImageLayoutFile,
+ Mode: 0444,
+ Size: int64(len(b)),
+ Typeflag: tar.TypeReg,
+ },
+ CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+ n, err := w.Write(b)
+ return int64(n), err
+ },
+ }
+
+}
+
+func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord {
+ index := ocispec.Index{
+ Versioned: ocispecs.Versioned{
+ SchemaVersion: 2,
+ },
+ MediaType: ocispec.MediaTypeImageIndex,
+ Manifests: manifests,
+ }
+
+ b, err := json.Marshal(index)
+ if err != nil {
+ panic(err)
+ }
+
+ return tarRecord{
+ Header: &tar.Header{
+ Name: ocispec.ImageIndexFile,
+ Mode: 0644,
+ Size: int64(len(b)),
+ Typeflag: tar.TypeReg,
+ },
+ CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+ n, err := w.Write(b)
+ return int64(n), err
+ },
+ }
+}
+
+type exportManifest struct {
+ manifest ocispec.Descriptor
+ names []string
+}
+
+func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) {
+ mfsts := make([]struct {
+ Config string
+ RepoTags []string
+ Layers []string
+ }, len(manifests))
+
+ var i int
+ for _, m := range manifests {
+ p, err := content.ReadBlob(ctx, store, m.manifest)
+ if err != nil {
+ return tarRecord{}, err
+ }
+
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(p, &manifest); err != nil {
+ return tarRecord{}, err
+ }
+
+ dgst := manifest.Config.Digest
+ if err := dgst.Validate(); err != nil {
+ return tarRecord{}, err
+ }
+ mfsts[i].Config = path.Join(ocispec.ImageBlobsDir, dgst.Algorithm().String(), dgst.Encoded())
+ for _, l := range manifest.Layers {
+ mfsts[i].Layers = append(mfsts[i].Layers, path.Join(ocispec.ImageBlobsDir, l.Digest.Algorithm().String(), l.Digest.Encoded()))
+ }
+
+ for _, name := range m.names {
+ nname, err := familiarizeReference(name)
+ if err != nil {
+ return tarRecord{}, err
+ }
+
+ mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname)
+ }
+
+ i++
+ }
+
+ b, err := json.Marshal(mfsts)
+ if err != nil {
+ return tarRecord{}, err
+ }
+
+ return tarRecord{
+ Header: &tar.Header{
+ Name: "manifest.json",
+ Mode: 0644,
+ Size: int64(len(b)),
+ Typeflag: tar.TypeReg,
+ },
+ CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+ n, err := w.Write(b)
+ return int64(n), err
+ },
+ }, nil
+}
+
+func writeTar(ctx context.Context, tw *tar.Writer, recordsWithEmpty []tarRecord) error {
+ var records []tarRecord
+ for _, r := range recordsWithEmpty {
+ if r.Header != nil {
+ records = append(records, r)
+ }
+ }
+ sort.Slice(records, func(i, j int) bool {
+ return records[i].Header.Name < records[j].Header.Name
+ })
+
+ var last string
+ for _, record := range records {
+ if record.Header.Name == last {
+ continue
+ }
+ last = record.Header.Name
+ if err := tw.WriteHeader(record.Header); err != nil {
+ return err
+ }
+ if record.CopyTo != nil {
+ n, err := record.CopyTo(ctx, tw)
+ if err != nil {
+ return err
+ }
+ if n != record.Header.Size {
+ return fmt.Errorf("unexpected copy size for %s", record.Header.Name)
+ }
+ } else if record.Header.Size > 0 {
+ return fmt.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/archive/importer.go b/vendor/github.com/containerd/containerd/v2/core/images/archive/importer.go
new file mode 100644
index 000000000000..64a1587f7731
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/archive/importer.go
@@ -0,0 +1,420 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package archive provides a Docker and OCI compatible importer
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "path"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/pkg/archive/compression"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/containerd/platforms"
+ digest "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/image-spec/specs-go"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type importOpts struct {
+ compress bool
+}
+
+// ImportOpt is an option for importing an OCI index
+type ImportOpt func(*importOpts) error
+
+// WithImportCompression compresses uncompressed layers on import.
+// This is used for import formats which do not include the manifest.
+func WithImportCompression() ImportOpt {
+ return func(io *importOpts) error {
+ io.compress = true
+ return nil
+ }
+}
+
+// ImportIndex imports an index from a tar archive image bundle
+// - implements Docker v1.1, v1.2 and OCI v1.
+// - prefers OCI v1 when provided
+// - creates OCI index for Docker formats
+// - normalizes Docker references and adds as OCI ref name
+// e.g. alpine:latest -> docker.io/library/alpine:latest
+// - existing OCI reference names are untouched
+func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
+ var (
+ tr = tar.NewReader(reader)
+
+ ociLayout ocispec.ImageLayout
+ mfsts []struct {
+ Config string
+ RepoTags []string
+ Layers []string
+ }
+ symlinks = make(map[string]string)
+ blobs = make(map[string]ocispec.Descriptor)
+ iopts importOpts
+ )
+
+ for _, o := range opts {
+ if err := o(&iopts); err != nil {
+ return ocispec.Descriptor{}, err
+ }
+ }
+
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
+ if hdr.Typeflag == tar.TypeSymlink {
+ symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname)
+ }
+
+ //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA
+ if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
+ if hdr.Typeflag != tar.TypeDir {
+ log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored")
+ }
+ continue
+ }
+
+ hdrName := path.Clean(hdr.Name)
+ if hdrName == ocispec.ImageLayoutFile {
+ if err = onUntarJSON(tr, &ociLayout); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("untar oci layout %q: %w", hdr.Name, err)
+ }
+ } else if hdrName == "manifest.json" {
+ if err = onUntarJSON(tr, &mfsts); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("untar manifest %q: %w", hdr.Name, err)
+ }
+ } else {
+ dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName)
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to ingest %q: %w", hdr.Name, err)
+ }
+
+ blobs[hdrName] = ocispec.Descriptor{
+ Digest: dgst,
+ Size: hdr.Size,
+ }
+ }
+ }
+
+ // If OCI layout was given, interpret the tar as an OCI layout.
+ // When not provided, the layout of the tar will be interpreted
+ // as Docker v1.1 or v1.2.
+ if ociLayout.Version != "" {
+ if ociLayout.Version != ocispec.ImageLayoutVersion {
+ return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version)
+ }
+
+ idx, ok := blobs[ocispec.ImageIndexFile]
+ if !ok {
+ return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
+ }
+
+ idx.MediaType = ocispec.MediaTypeImageIndex
+ return idx, nil
+ }
+
+ if mfsts == nil {
+ return ocispec.Descriptor{}, errors.New("unrecognized image format")
+ }
+
+ for name, linkname := range symlinks {
+ desc, ok := blobs[linkname]
+ if !ok {
+ return ocispec.Descriptor{}, fmt.Errorf("no target for symlink layer from %q to %q", name, linkname)
+ }
+ blobs[name] = desc
+ }
+
+ idx := ocispec.Index{
+ Versioned: specs.Versioned{
+ SchemaVersion: 2,
+ },
+ }
+ for _, mfst := range mfsts {
+ config, ok := blobs[mfst.Config]
+ if !ok {
+ return ocispec.Descriptor{}, fmt.Errorf("image config %q not found", mfst.Config)
+ }
+ config.MediaType = images.MediaTypeDockerSchema2Config
+
+ layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to resolve layers: %w", err)
+ }
+
+ manifest := struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ Config ocispec.Descriptor `json:"config"`
+ Layers []ocispec.Descriptor `json:"layers"`
+ }{
+ SchemaVersion: 2,
+ MediaType: images.MediaTypeDockerSchema2Manifest,
+ Config: config,
+ Layers: layers,
+ }
+
+ desc, err := writeManifest(ctx, store, manifest, manifest.MediaType)
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("write docker manifest: %w", err)
+ }
+
+ imgPlatforms, err := images.Platforms(ctx, store, desc)
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("unable to resolve platform: %w", err)
+ }
+ if len(imgPlatforms) > 0 {
+ // Only one platform can be resolved from non-index manifest,
+ // The platform can only come from the config included above,
+ // if the config has no platform it can be safely omitted.
+ desc.Platform = &imgPlatforms[0]
+
+ // If the image we've just imported is a Windows image without the OSVersion set,
+ // we could just assume it matches this host's OS Version. Without this, the
+ // children labels might not be set on the image content, leading to it being
+ // garbage collected, breaking the image.
+ // See: /~https://github.com/containerd/containerd/issues/5690
+ if desc.Platform.OS == "windows" && desc.Platform.OSVersion == "" {
+ platform := platforms.DefaultSpec()
+ desc.Platform.OSVersion = platform.OSVersion
+ }
+ }
+
+ if len(mfst.RepoTags) == 0 {
+ idx.Manifests = append(idx.Manifests, desc)
+ } else {
+ // Add descriptor per tag
+ for _, ref := range mfst.RepoTags {
+ mfstdesc := desc
+
+ normalized, err := normalizeReference(ref)
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
+
+ mfstdesc.Annotations = map[string]string{
+ images.AnnotationImageName: normalized,
+ ocispec.AnnotationRefName: ociReferenceName(normalized),
+ }
+
+ idx.Manifests = append(idx.Manifests, mfstdesc)
+ }
+ }
+ }
+
+ return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex)
+}
+
+const (
+ kib = 1024
+ mib = 1024 * kib
+ jsonLimit = 20 * mib
+)
+
+func onUntarJSON(r io.Reader, j interface{}) error {
+ return json.NewDecoder(io.LimitReader(r, jsonLimit)).Decode(j)
+}
+
+func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) {
+ dgstr := digest.Canonical.Digester()
+
+ if err := content.WriteBlob(ctx, store, ref, io.TeeReader(r, dgstr.Hash()), ocispec.Descriptor{Size: size}); err != nil {
+ return "", err
+ }
+
+ return dgstr.Digest(), nil
+}
+
+func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
+ layers := make([]ocispec.Descriptor, len(layerFiles))
+ filters := make([]string, len(layerFiles))
+ descs := map[digest.Digest]*ocispec.Descriptor{}
+ for i, f := range layerFiles {
+ desc, ok := blobs[f]
+ if !ok {
+ return nil, fmt.Errorf("layer %q not found", f)
+ }
+ layers[i] = desc
+ descs[desc.Digest] = &layers[i]
+ filters[i] = fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String())
+ }
+
+ err := store.Walk(ctx, func(info content.Info) error {
+ dgst, ok := info.Labels[labels.LabelUncompressed]
+ if ok {
+ desc := descs[digest.Digest(dgst)]
+ if desc != nil {
+ desc.Digest = info.Digest
+ desc.Size = info.Size
+ mediaType, err := detectLayerMediaType(ctx, store, *desc)
+ if err != nil {
+ return fmt.Errorf("failed to detect media type of layer: %w", err)
+ }
+ desc.MediaType = mediaType
+ }
+ }
+ return nil
+ }, filters...)
+ if err != nil {
+ return nil, fmt.Errorf("failure checking for compressed blobs: %w", err)
+ }
+
+ for i, desc := range layers {
+ if desc.MediaType != "" {
+ continue
+ }
+ // Open blob, resolve media type
+ ra, err := store.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open %q (%s): %w", layerFiles[i], desc.Digest, err)
+ }
+ s, err := compression.DecompressStream(content.NewReader(ra))
+ if err != nil {
+ ra.Close()
+ return nil, fmt.Errorf("failed to detect compression for %q: %w", layerFiles[i], err)
+ }
+ if s.GetCompression() == compression.Uncompressed {
+ if compress {
+ if err := desc.Digest.Validate(); err != nil {
+ return nil, err
+ }
+ ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
+ labels := map[string]string{
+ labels.LabelUncompressed: desc.Digest.String(),
+ }
+ layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
+ if err != nil {
+ s.Close()
+ ra.Close()
+ return nil, err
+ }
+ layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
+ } else {
+ layers[i].MediaType = images.MediaTypeDockerSchema2Layer
+ }
+ } else {
+ layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
+ }
+ s.Close()
+ ra.Close()
+ }
+ return layers, nil
+}
+
+func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to open writer: %w", err)
+ }
+
+ defer func() {
+ w.Close()
+ if err != nil {
+ cs.Abort(ctx, ref)
+ }
+ }()
+ if err := w.Truncate(0); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to truncate writer: %w", err)
+ }
+
+ cw, err := compression.CompressStream(w, compression.Gzip)
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
+
+ if _, err := io.Copy(cw, r); err != nil {
+ return ocispec.Descriptor{}, err
+ }
+ if err := cw.Close(); err != nil {
+ return ocispec.Descriptor{}, err
+ }
+
+ cst, err := w.Status()
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to get writer status: %w", err)
+ }
+
+ desc.Digest = w.Digest()
+ desc.Size = cst.Offset
+
+ if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
+ if !errdefs.IsAlreadyExists(err) {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to commit: %w", err)
+ }
+ }
+
+ return desc, nil
+}
+
+func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
+ manifestBytes, err := json.Marshal(manifest)
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
+
+ desc := ocispec.Descriptor{
+ MediaType: mediaType,
+ Digest: digest.FromBytes(manifestBytes),
+ Size: int64(len(manifestBytes)),
+ }
+ if err := content.WriteBlob(ctx, cs, "manifest-"+desc.Digest.String(), bytes.NewReader(manifestBytes), desc); err != nil {
+ return ocispec.Descriptor{}, err
+ }
+
+ return desc, nil
+}
+
+func detectLayerMediaType(ctx context.Context, store content.Store, desc ocispec.Descriptor) (string, error) {
+ var mediaType string
+ // need to parse existing blob to use the proper media type
+ bytes := make([]byte, 10)
+ ra, err := store.ReaderAt(ctx, desc)
+ if err != nil {
+ return "", fmt.Errorf("failed to read content store to detect layer media type: %w", err)
+ }
+ defer ra.Close()
+ _, err = ra.ReadAt(bytes, 0)
+ if err != nil && err != io.EOF {
+ return "", fmt.Errorf("failed to read header bytes from layer to detect media type: %w", err)
+ }
+ if err == io.EOF {
+ // in the case of an empty layer then the media type should be uncompressed
+ return images.MediaTypeDockerSchema2Layer, nil
+ }
+ switch c := compression.DetectCompression(bytes); c {
+ case compression.Uncompressed:
+ mediaType = images.MediaTypeDockerSchema2Layer
+ default:
+ mediaType = images.MediaTypeDockerSchema2LayerGzip
+ }
+ return mediaType, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/archive/reference.go b/vendor/github.com/containerd/containerd/v2/core/images/archive/reference.go
new file mode 100644
index 000000000000..5e12eb98f48b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/archive/reference.go
@@ -0,0 +1,114 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package archive
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ distref "github.com/distribution/reference"
+ "github.com/opencontainers/go-digest"
+)
+
+// FilterRefPrefix restricts references to having the given image
+// prefix. Tag-only references will have the prefix prepended.
+func FilterRefPrefix(image string) func(string) string {
+ return refTranslator(image, true)
+}
+
+// AddRefPrefix prepends the given image prefix to tag-only references,
+// while leaving returning full references unmodified.
+func AddRefPrefix(image string) func(string) string {
+ return refTranslator(image, false)
+}
+
+// refTranslator creates a reference which only has a tag or verifies
+// a full reference.
+func refTranslator(image string, checkPrefix bool) func(string) string {
+ return func(ref string) string {
+ if image == "" {
+ return ""
+ }
+ // Check if ref is full reference
+ if strings.ContainsAny(ref, "/:@") {
+ // If not prefixed, don't include image
+ if checkPrefix && !isImagePrefix(ref, image) {
+ return ""
+ }
+ return ref
+ }
+ return image + ":" + ref
+ }
+}
+
+func isImagePrefix(s, prefix string) bool {
+ if !strings.HasPrefix(s, prefix) {
+ return false
+ }
+ if len(s) > len(prefix) {
+ switch s[len(prefix)] {
+ case '/', ':', '@':
+ // Prevent matching partial namespaces
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+func normalizeReference(ref string) (string, error) {
+ normalized, err := distref.ParseDockerRef(ref)
+ if err != nil {
+ return "", fmt.Errorf("normalize image ref %q: %w", ref, err)
+ }
+
+ return normalized.String(), nil
+}
+
+func familiarizeReference(ref string) (string, error) {
+ named, err := distref.ParseNormalizedNamed(ref)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse %q: %w", ref, err)
+ }
+ named = distref.TagNameOnly(named)
+
+ return distref.FamiliarString(named), nil
+}
+
+func ociReferenceName(name string) string {
+ // OCI defines the reference name as only a tag excluding the
+ // repository. The containerd annotation contains the full image name
+ // since the tag is insufficient for correctly naming and referring to an
+ // image
+ var ociRef string
+ if spec, err := reference.Parse(name); err == nil {
+ ociRef = spec.Object
+ } else {
+ ociRef = name
+ }
+
+ return ociRef
+}
+
+// DigestTranslator creates a digest reference by adding the
+// digest to an image name
+func DigestTranslator(prefix string) func(digest.Digest) string {
+ return func(dgst digest.Digest) string {
+ return prefix + "@" + dgst.String()
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/converter/converter.go b/vendor/github.com/containerd/containerd/v2/core/images/converter/converter.go
new file mode 100644
index 000000000000..20308cb55a84
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/converter/converter.go
@@ -0,0 +1,126 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package converter provides image converter
+package converter
+
+import (
+ "context"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/leases"
+ "github.com/containerd/platforms"
+)
+
+type convertOpts struct {
+ layerConvertFunc ConvertFunc
+ docker2oci bool
+ indexConvertFunc ConvertFunc
+ platformMC platforms.MatchComparer
+}
+
+// Opt is an option for Convert()
+type Opt func(*convertOpts) error
+
+// WithLayerConvertFunc specifies the function that converts layers.
+func WithLayerConvertFunc(fn ConvertFunc) Opt {
+ return func(copts *convertOpts) error {
+ copts.layerConvertFunc = fn
+ return nil
+ }
+}
+
+// WithDockerToOCI converts Docker media types into OCI ones.
+func WithDockerToOCI(v bool) Opt {
+ return func(copts *convertOpts) error {
+ copts.docker2oci = true
+ return nil
+ }
+}
+
+// WithPlatform specifies the platform.
+// Defaults to all platforms.
+func WithPlatform(p platforms.MatchComparer) Opt {
+ return func(copts *convertOpts) error {
+ copts.platformMC = p
+ return nil
+ }
+}
+
+// WithIndexConvertFunc specifies the function that converts manifests and index (manifest lists).
+// Defaults to DefaultIndexConvertFunc.
+func WithIndexConvertFunc(fn ConvertFunc) Opt {
+ return func(copts *convertOpts) error {
+ copts.indexConvertFunc = fn
+ return nil
+ }
+}
+
+// Client is implemented by *containerd.Client .
+type Client interface {
+ WithLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error)
+ ContentStore() content.Store
+ ImageService() images.Store
+}
+
+// Convert converts an image.
+func Convert(ctx context.Context, client Client, dstRef, srcRef string, opts ...Opt) (*images.Image, error) {
+ var copts convertOpts
+ for _, o := range opts {
+ if err := o(&copts); err != nil {
+ return nil, err
+ }
+ }
+ if copts.platformMC == nil {
+ copts.platformMC = platforms.All
+ }
+ if copts.indexConvertFunc == nil {
+ copts.indexConvertFunc = DefaultIndexConvertFunc(copts.layerConvertFunc, copts.docker2oci, copts.platformMC)
+ }
+
+ ctx, done, err := client.WithLease(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer done(ctx)
+
+ cs := client.ContentStore()
+ is := client.ImageService()
+ srcImg, err := is.Get(ctx, srcRef)
+ if err != nil {
+ return nil, err
+ }
+
+ dstDesc, err := copts.indexConvertFunc(ctx, cs, srcImg.Target)
+ if err != nil {
+ return nil, err
+ }
+
+ dstImg := srcImg
+ dstImg.Name = dstRef
+ if dstDesc != nil {
+ dstImg.Target = *dstDesc
+ }
+ var res images.Image
+ if dstRef != srcRef {
+ _ = is.Delete(ctx, dstRef)
+ res, err = is.Create(ctx, dstImg)
+ } else {
+ res, err = is.Update(ctx, dstImg)
+ }
+ return &res, err
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/converter/default.go b/vendor/github.com/containerd/containerd/v2/core/images/converter/default.go
new file mode 100644
index 000000000000..98f7965367cc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/converter/default.go
@@ -0,0 +1,454 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package converter
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/log"
+ "github.com/containerd/platforms"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sync/errgroup"
+)
+
+// ConvertFunc returns a converted content descriptor.
+// When the content was not converted, ConvertFunc returns nil.
+type ConvertFunc func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error)
+
+// DefaultIndexConvertFunc is the default convert func used by Convert.
+func DefaultIndexConvertFunc(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer) ConvertFunc {
+ c := &defaultConverter{
+ layerConvertFunc: layerConvertFunc,
+ docker2oci: docker2oci,
+ platformMC: platformMC,
+ diffIDMap: make(map[digest.Digest]digest.Digest),
+ }
+ return c.convert
+}
+
+// ConvertHookFunc is a callback function called during conversion of a blob.
+// orgDesc is the target descriptor to convert. newDesc is passed if conversion happens.
+type ConvertHookFunc func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error)
+
+// ConvertHooks is a configuration for hook callbacks called during blob conversion.
+type ConvertHooks struct {
+ // PostConvertHook is a callback function called for each blob after conversion is done.
+ PostConvertHook ConvertHookFunc
+}
+
+// IndexConvertFuncWithHook is the convert func used by Convert with hook functions support.
+func IndexConvertFuncWithHook(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer, hooks ConvertHooks) ConvertFunc {
+ c := &defaultConverter{
+ layerConvertFunc: layerConvertFunc,
+ docker2oci: docker2oci,
+ platformMC: platformMC,
+ diffIDMap: make(map[digest.Digest]digest.Digest),
+ hooks: hooks,
+ }
+ return c.convert
+}
+
+type defaultConverter struct {
+ layerConvertFunc ConvertFunc
+ docker2oci bool
+ platformMC platforms.MatchComparer
+ diffIDMap map[digest.Digest]digest.Digest // key: old diffID, value: new diffID
+ diffIDMapMu sync.RWMutex
+ hooks ConvertHooks
+}
+
+// convert dispatches desc.MediaType and calls c.convert{Layer,Manifest,Index,Config}.
+//
+// Also converts media type if c.docker2oci is set.
+func (c *defaultConverter) convert(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ var (
+ newDesc *ocispec.Descriptor
+ err error
+ )
+ if images.IsLayerType(desc.MediaType) {
+ newDesc, err = c.convertLayer(ctx, cs, desc)
+ } else if images.IsManifestType(desc.MediaType) {
+ newDesc, err = c.convertManifest(ctx, cs, desc)
+ } else if images.IsIndexType(desc.MediaType) {
+ newDesc, err = c.convertIndex(ctx, cs, desc)
+ } else if images.IsConfigType(desc.MediaType) {
+ newDesc, err = c.convertConfig(ctx, cs, desc)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if c.hooks.PostConvertHook != nil {
+ if newDescPost, err := c.hooks.PostConvertHook(ctx, cs, desc, newDesc); err != nil {
+ return nil, err
+ } else if newDescPost != nil {
+ newDesc = newDescPost
+ }
+ }
+
+ if images.IsDockerType(desc.MediaType) {
+ if c.docker2oci {
+ if newDesc == nil {
+ newDesc = copyDesc(desc)
+ }
+ newDesc.MediaType = ConvertDockerMediaTypeToOCI(newDesc.MediaType)
+ } else if (newDesc == nil && len(desc.Annotations) != 0) || (newDesc != nil && len(newDesc.Annotations) != 0) {
+ // Annotations is supported only on OCI manifest.
+ // We need to remove annotations for Docker media types.
+ if newDesc == nil {
+ newDesc = copyDesc(desc)
+ }
+ newDesc.Annotations = nil
+ }
+ }
+ log.G(ctx).WithField("old", desc).WithField("new", newDesc).Debugf("converted")
+ return newDesc, nil
+}
+
+func copyDesc(desc ocispec.Descriptor) *ocispec.Descriptor {
+ descCopy := desc
+ return &descCopy
+}
+
+// convertLayer converts image layers if c.layerConvertFunc is set.
+//
+// c.layerConvertFunc can be nil, e.g., for converting Docker media types to OCI ones.
+func (c *defaultConverter) convertLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ if c.layerConvertFunc != nil {
+ return c.layerConvertFunc(ctx, cs, desc)
+ }
+ return nil, nil
+}
+
+// convertManifest converts image manifests.
+//
+// - converts `.mediaType` if the target format is OCI
+// - records diff ID changes in c.diffIDMap
+func (c *defaultConverter) convertManifest(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ var (
+ manifest ocispec.Manifest
+ modified bool
+ )
+ labels, err := readJSON(ctx, cs, &manifest, desc)
+ if err != nil {
+ return nil, err
+ }
+ if labels == nil {
+ labels = make(map[string]string)
+ }
+ if images.IsDockerType(manifest.MediaType) && c.docker2oci {
+ manifest.MediaType = ConvertDockerMediaTypeToOCI(manifest.MediaType)
+ modified = true
+ }
+ var mu sync.Mutex
+ eg, ctx2 := errgroup.WithContext(ctx)
+ for i, l := range manifest.Layers {
+ i := i
+ l := l
+ oldDiffID, err := images.GetDiffID(ctx, cs, l)
+ if err != nil {
+ return nil, err
+ }
+ eg.Go(func() error {
+ newL, err := c.convert(ctx2, cs, l)
+ if err != nil {
+ return err
+ }
+ if newL != nil {
+ mu.Lock()
+ // update GC labels
+ ClearGCLabels(labels, l.Digest)
+ labelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", i)
+ labels[labelKey] = newL.Digest.String()
+ manifest.Layers[i] = *newL
+ modified = true
+ mu.Unlock()
+
+ // diffID changes if the tar entries were modified.
+ // diffID stays same if only the compression type was changed.
+ // When diffID changed, add a map entry so that we can update image config.
+ newDiffID, err := images.GetDiffID(ctx, cs, *newL)
+ if err != nil {
+ return err
+ }
+ if newDiffID != oldDiffID {
+ c.diffIDMapMu.Lock()
+ c.diffIDMap[oldDiffID] = newDiffID
+ c.diffIDMapMu.Unlock()
+ }
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ newConfig, err := c.convert(ctx, cs, manifest.Config)
+ if err != nil {
+ return nil, err
+ }
+ if newConfig != nil {
+ ClearGCLabels(labels, manifest.Config.Digest)
+ labels["containerd.io/gc.ref.content.config"] = newConfig.Digest.String()
+ manifest.Config = *newConfig
+ modified = true
+ }
+
+ if modified {
+ return writeJSON(ctx, cs, &manifest, desc, labels)
+ }
+ return nil, nil
+}
+
+// convertIndex converts image index.
+//
+// - converts `.mediaType` if the target format is OCI
+// - clears manifest entries that do not match c.platformMC
+func (c *defaultConverter) convertIndex(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ var (
+ index ocispec.Index
+ modified bool
+ )
+ labels, err := readJSON(ctx, cs, &index, desc)
+ if err != nil {
+ return nil, err
+ }
+ if labels == nil {
+ labels = make(map[string]string)
+ }
+ if images.IsDockerType(index.MediaType) && c.docker2oci {
+ index.MediaType = ConvertDockerMediaTypeToOCI(index.MediaType)
+ modified = true
+ }
+
+ newManifests := make([]ocispec.Descriptor, len(index.Manifests))
+ newManifestsToBeRemoved := make(map[int]struct{}) // slice index
+ var mu sync.Mutex
+ eg, ctx2 := errgroup.WithContext(ctx)
+ for i, mani := range index.Manifests {
+ i := i
+ mani := mani
+ labelKey := fmt.Sprintf("containerd.io/gc.ref.content.m.%d", i)
+ eg.Go(func() error {
+ if mani.Platform != nil && !c.platformMC.Match(*mani.Platform) {
+ mu.Lock()
+ ClearGCLabels(labels, mani.Digest)
+ newManifestsToBeRemoved[i] = struct{}{}
+ modified = true
+ mu.Unlock()
+ return nil
+ }
+ newMani, err := c.convert(ctx2, cs, mani)
+ if err != nil {
+ return err
+ }
+ mu.Lock()
+ if newMani != nil {
+ ClearGCLabels(labels, mani.Digest)
+ labels[labelKey] = newMani.Digest.String()
+ // NOTE: for keeping manifest order, we specify `i` index explicitly
+ newManifests[i] = *newMani
+ modified = true
+ } else {
+ newManifests[i] = mani
+ }
+ mu.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+ if modified {
+ var newManifestsClean []ocispec.Descriptor
+ for i, m := range newManifests {
+ if _, ok := newManifestsToBeRemoved[i]; !ok {
+ newManifestsClean = append(newManifestsClean, m)
+ }
+ }
+ index.Manifests = newManifestsClean
+ return writeJSON(ctx, cs, &index, desc, labels)
+ }
+ return nil, nil
+}
+
+// convertConfig converts image config contents.
+//
+// - updates `.rootfs.diff_ids` using c.diffIDMap .
+//
+// - clears legacy `.config.Image` and `.container_config.Image` fields if `.rootfs.diff_ids` was updated.
+func (c *defaultConverter) convertConfig(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ var (
+ cfg DualConfig
+ cfgAsOCI ocispec.Image // read only, used for parsing cfg
+ modified bool
+ )
+
+ labels, err := readJSON(ctx, cs, &cfg, desc)
+ if err != nil {
+ return nil, err
+ }
+ if labels == nil {
+ labels = make(map[string]string)
+ }
+ if _, err := readJSON(ctx, cs, &cfgAsOCI, desc); err != nil {
+ return nil, err
+ }
+
+ if rootfs := cfgAsOCI.RootFS; rootfs.Type == "layers" {
+ rootfsModified := false
+ c.diffIDMapMu.RLock()
+ for i, oldDiffID := range rootfs.DiffIDs {
+ if newDiffID, ok := c.diffIDMap[oldDiffID]; ok && newDiffID != oldDiffID {
+ rootfs.DiffIDs[i] = newDiffID
+ rootfsModified = true
+ }
+ }
+ c.diffIDMapMu.RUnlock()
+ if rootfsModified {
+ rootfsB, err := json.Marshal(rootfs)
+ if err != nil {
+ return nil, err
+ }
+ cfg["rootfs"] = (*json.RawMessage)(&rootfsB)
+ modified = true
+ }
+ }
+
+ if modified {
+ // cfg may have dummy value for legacy `.config.Image` and `.container_config.Image`
+ // We should clear the ID if we changed the diff IDs.
+ if _, err := clearDockerV1DummyID(cfg); err != nil {
+ return nil, err
+ }
+ return writeJSON(ctx, cs, &cfg, desc, labels)
+ }
+ return nil, nil
+}
+
+// clearDockerV1DummyID clears the dummy values for legacy `.config.Image` and `.container_config.Image`.
+// Returns true if the cfg was modified.
+func clearDockerV1DummyID(cfg DualConfig) (bool, error) {
+ var modified bool
+ f := func(k string) error {
+ if configX, ok := cfg[k]; ok && configX != nil {
+ var configField map[string]*json.RawMessage
+ if err := json.Unmarshal(*configX, &configField); err != nil {
+ return err
+ }
+ delete(configField, "Image")
+ b, err := json.Marshal(configField)
+ if err != nil {
+ return err
+ }
+ cfg[k] = (*json.RawMessage)(&b)
+ modified = true
+ }
+ return nil
+ }
+ if err := f("config"); err != nil {
+ return modified, err
+ }
+ if err := f("container_config"); err != nil {
+ return modified, err
+ }
+ return modified, nil
+}
+
+// DualConfig covers Docker config (v1.0, v1.1, v1.2) and OCI config.
+// Unmarshalled as map[string]*json.RawMessage to retain unknown fields on remarshalling.
+type DualConfig map[string]*json.RawMessage
+
+func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) {
+ info, err := cs.Info(ctx, desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ labels := info.Labels
+ b, err := content.ReadBlob(ctx, cs, desc)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(b, x); err != nil {
+ return nil, err
+ }
+ return labels, nil
+}
+
+func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) {
+ b, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ dgst := digest.SHA256.FromBytes(b)
+ ref := fmt.Sprintf("converter-write-json-%s", dgst.String())
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return nil, err
+ }
+ if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil {
+ w.Close()
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ return nil, err
+ }
+ newDesc := oldDesc
+ newDesc.Size = int64(len(b))
+ newDesc.Digest = dgst
+ return &newDesc, nil
+}
+
+// ConvertDockerMediaTypeToOCI converts a media type string
+func ConvertDockerMediaTypeToOCI(mt string) string {
+ switch mt {
+ case images.MediaTypeDockerSchema2ManifestList:
+ return ocispec.MediaTypeImageIndex
+ case images.MediaTypeDockerSchema2Manifest:
+ return ocispec.MediaTypeImageManifest
+ case images.MediaTypeDockerSchema2LayerGzip:
+ return ocispec.MediaTypeImageLayerGzip
+ case images.MediaTypeDockerSchema2LayerForeignGzip:
+ return ocispec.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // deprecated
+ case images.MediaTypeDockerSchema2Layer:
+ return ocispec.MediaTypeImageLayer
+ case images.MediaTypeDockerSchema2LayerForeign:
+ return ocispec.MediaTypeImageLayerNonDistributable //nolint:staticcheck // deprecated
+ case images.MediaTypeDockerSchema2Config:
+ return ocispec.MediaTypeImageConfig
+ default:
+ return mt
+ }
+}
+
+// ClearGCLabels clears GC labels for the given digest.
+func ClearGCLabels(labels map[string]string, dgst digest.Digest) {
+ for k, v := range labels {
+ if v == dgst.String() && strings.HasPrefix(k, "containerd.io/gc.ref.content") {
+ delete(labels, k)
+ }
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/converter/uncompress/uncompress.go b/vendor/github.com/containerd/containerd/v2/core/images/converter/uncompress/uncompress.go
new file mode 100644
index 000000000000..f5d5acba367e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/converter/uncompress/uncompress.go
@@ -0,0 +1,122 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package uncompress
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/images/converter"
+ "github.com/containerd/containerd/v2/pkg/archive/compression"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var _ converter.ConvertFunc = LayerConvertFunc
+
+// LayerConvertFunc converts tar.gz layers into uncompressed tar layers.
+// Media type is changed, e.g., "application/vnd.oci.image.layer.v1.tar+gzip" -> "application/vnd.oci.image.layer.v1.tar"
+func LayerConvertFunc(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ if !images.IsLayerType(desc.MediaType) || IsUncompressedType(desc.MediaType) {
+ // No conversion. No need to return an error here.
+ return nil, nil
+ }
+ info, err := cs.Info(ctx, desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ readerAt, err := cs.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ defer readerAt.Close()
+ sr := io.NewSectionReader(readerAt, 0, desc.Size)
+ newR, err := compression.DecompressStream(sr)
+ if err != nil {
+ return nil, err
+ }
+ defer newR.Close()
+ ref := fmt.Sprintf("convert-uncompress-from-%s", desc.Digest)
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return nil, err
+ }
+ defer w.Close()
+
+ // Reset the writing position
+ // Old writer possibly remains without aborted
+ // (e.g. conversion interrupted by a signal)
+ if err := w.Truncate(0); err != nil {
+ return nil, err
+ }
+
+ n, err := io.Copy(w, newR)
+ if err != nil {
+ return nil, err
+ }
+ if err := newR.Close(); err != nil {
+ return nil, err
+ }
+ // no need to retain "containerd.io/uncompressed" label, but retain other labels ("containerd.io/distribution.source.*")
+ labelsMap := info.Labels
+ delete(labelsMap, labels.LabelUncompressed)
+ if err = w.Commit(ctx, 0, "", content.WithLabels(labelsMap)); err != nil && !errdefs.IsAlreadyExists(err) {
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ return nil, err
+ }
+ newDesc := desc
+ newDesc.Digest = w.Digest()
+ newDesc.Size = n
+ newDesc.MediaType = convertMediaType(newDesc.MediaType)
+ return &newDesc, nil
+}
+
+// IsUncompressedType returns whether the provided media type is considered
+// an uncompressed layer type
+func IsUncompressedType(mt string) bool {
+ switch mt {
+ case
+ images.MediaTypeDockerSchema2Layer,
+ images.MediaTypeDockerSchema2LayerForeign,
+ ocispec.MediaTypeImageLayer,
+ ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // deprecated
+ return true
+ default:
+ return false
+ }
+}
+
+func convertMediaType(mt string) string {
+ switch mt {
+ case images.MediaTypeDockerSchema2LayerGzip:
+ return images.MediaTypeDockerSchema2Layer
+ case images.MediaTypeDockerSchema2LayerForeignGzip:
+ return images.MediaTypeDockerSchema2LayerForeign
+ case ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayerZstd:
+ return ocispec.MediaTypeImageLayer
+ case ocispec.MediaTypeImageLayerNonDistributableGzip, ocispec.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // deprecated
+ return ocispec.MediaTypeImageLayerNonDistributable //nolint:staticcheck // deprecated
+ default:
+ return mt
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/diffid.go b/vendor/github.com/containerd/containerd/v2/core/images/diffid.go
new file mode 100644
index 000000000000..140d2c07c6f1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/diffid.go
@@ -0,0 +1,82 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+import (
+ "context"
+ "io"
+
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/pkg/archive/compression"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/log"
+)
+
+// GetDiffID gets the diff ID of the layer blob descriptor.
+func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) {
+ switch desc.MediaType {
+ case
+ // If the layer is already uncompressed, we can just return its digest
+ MediaTypeDockerSchema2Layer,
+ ocispec.MediaTypeImageLayer,
+ MediaTypeDockerSchema2LayerForeign,
+ ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // deprecated
+ return desc.Digest, nil
+ }
+ info, err := cs.Info(ctx, desc.Digest)
+ if err != nil {
+ return "", err
+ }
+ v, ok := info.Labels[labels.LabelUncompressed]
+ if ok {
+ // Fast path: if the image is already unpacked, we can use the label value
+ return digest.Parse(v)
+ }
+ // if the image is not unpacked, we may not have the label
+ ra, err := cs.ReaderAt(ctx, desc)
+ if err != nil {
+ return "", err
+ }
+ defer ra.Close()
+ r := content.NewReader(ra)
+ uR, err := compression.DecompressStream(r)
+ if err != nil {
+ return "", err
+ }
+ defer uR.Close()
+ digester := digest.Canonical.Digester()
+ hashW := digester.Hash()
+ if _, err := io.Copy(hashW, uR); err != nil {
+ return "", err
+ }
+ if err := ra.Close(); err != nil {
+ return "", err
+ }
+ digest := digester.Digest()
+ // memorize the computed value
+ if info.Labels == nil {
+ info.Labels = make(map[string]string)
+ }
+ info.Labels[labels.LabelUncompressed] = digest.String()
+ if _, err := cs.Update(ctx, info, "labels"); err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest)
+ }
+ return digest, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/handlers.go b/vendor/github.com/containerd/containerd/v2/core/images/handlers.go
new file mode 100644
index 000000000000..0487fdebdaee
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/handlers.go
@@ -0,0 +1,320 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/platforms"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/sync/semaphore"
+)
+
+var (
+ // ErrSkipDesc is used to skip processing of a descriptor and
+ // its descendants.
+ ErrSkipDesc = errors.New("skip descriptor")
+
+ // ErrStopHandler is used to signify that the descriptor
+ // has been handled and should not be handled further.
+ // This applies only to a single descriptor in a handler
+ // chain and does not apply to descendant descriptors.
+ ErrStopHandler = errors.New("stop handler")
+
+ // ErrEmptyWalk is used when the WalkNotEmpty handlers return no
+ // children (e.g.: they were filtered out).
+ ErrEmptyWalk = errors.New("image might be filtered out")
+)
+
+// Handler handles image manifests
+type Handler interface {
+ Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
+}
+
+// HandlerFunc function implementing the Handler interface
+type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
+
+// Handle image manifests
+func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
+ return fn(ctx, desc)
+}
+
+// Handlers returns a handler that will run the handlers in sequence.
+//
+// A handler may return `ErrStopHandler` to stop calling additional handlers
+func Handlers(handlers ...Handler) HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
+ var children []ocispec.Descriptor
+ for _, handler := range handlers {
+ ch, err := handler.Handle(ctx, desc)
+ if err != nil {
+ if errors.Is(err, ErrStopHandler) {
+ break
+ }
+ return nil, err
+ }
+
+ children = append(children, ch...)
+ }
+
+ return children, nil
+ }
+}
+
+// Walk the resources of an image and call the handler for each. If the handler
+// decodes the sub-resources for each image,
+//
+// This differs from dispatch in that each sibling resource is considered
+// synchronously.
+func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
+ for _, desc := range descs {
+
+ children, err := handler.Handle(ctx, desc)
+ if err != nil {
+ if errors.Is(err, ErrSkipDesc) {
+ continue // don't traverse the children.
+ }
+ return err
+ }
+
+ if len(children) > 0 {
+ if err := Walk(ctx, handler, children...); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// WalkNotEmpty works the same way Walk does, with the exception that it ensures that
+// some children are still found by Walking the descriptors (for example, not all of
+// them have been filtered out by one of the handlers). If there are no children,
+// then an ErrEmptyWalk error is returned.
+func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
+ isEmpty := true
+ var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := handler.Handle(ctx, desc)
+ if err != nil {
+ return children, err
+ }
+
+ if len(children) > 0 {
+ isEmpty = false
+ }
+
+ return children, nil
+ }
+
+ err := Walk(ctx, notEmptyHandler, descs...)
+ if err != nil {
+ return err
+ }
+
+ if isEmpty {
+ return ErrEmptyWalk
+ }
+
+ return nil
+}
+
+// Dispatch runs the provided handler for content specified by the descriptors.
+// If the handler decode subresources, they will be visited, as well.
+//
+// Handlers for siblings are run in parallel on the provided descriptors. A
+// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
+// any children.
+//
+// A concurrency limiter can be passed in to limit the number of concurrent
+// handlers running. When limiter is nil, there is no limit.
+//
+// Typically, this function will be used with `FetchHandler`, often composed
+// with other handlers.
+//
+// If any handler returns an error, the dispatch session will be canceled.
+func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
+ eg, ctx2 := errgroup.WithContext(ctx)
+ for _, desc := range descs {
+ desc := desc
+
+ if limiter != nil {
+ if err := limiter.Acquire(ctx, 1); err != nil {
+ return err
+ }
+ }
+
+ eg.Go(func() error {
+ desc := desc
+
+ children, err := handler.Handle(ctx2, desc)
+ if limiter != nil {
+ limiter.Release(1)
+ }
+ if err != nil {
+ if errors.Is(err, ErrSkipDesc) {
+ return nil // don't traverse the children.
+ }
+ return err
+ }
+
+ if len(children) > 0 {
+ return Dispatch(ctx2, handler, limiter, children...)
+ }
+
+ return nil
+ })
+ }
+
+ return eg.Wait()
+}
+
+// ChildrenHandler decodes well-known manifest types and returns their children.
+//
+// This is useful for supporting recursive fetch and other use cases where you
+// want to do a full walk of resources.
+//
+// One can also replace this with another implementation to allow descending of
+// arbitrary types.
+func ChildrenHandler(provider content.Provider) HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ return Children(ctx, provider, desc)
+ }
+}
+
+// SetChildrenLabels is a handler wrapper which sets labels for the content on
+// the children returned by the handler and passes through the children.
+// Must follow a handler that returns the children to be labeled.
+func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
+ return SetChildrenMappedLabels(manager, f, nil)
+}
+
+// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on
+// the children returned by the handler and passes through the children.
+// Must follow a handler that returns the children to be labeled.
+// The label map allows the caller to control the labels per child descriptor.
+// For returned labels, the index of the child will be appended to the end
+// except for the first index when the returned label does not end with '.'.
+func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc {
+ if labelMap == nil {
+ labelMap = ChildGCLabels
+ }
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := f(ctx, desc)
+ if err != nil {
+ return children, err
+ }
+
+ if len(children) > 0 {
+ var (
+ info = content.Info{
+ Digest: desc.Digest,
+ Labels: map[string]string{},
+ }
+ fields = []string{}
+ keys = map[string]uint{}
+ )
+ for _, ch := range children {
+ labelKeys := labelMap(ch)
+ for _, key := range labelKeys {
+ idx := keys[key]
+ keys[key] = idx + 1
+ if idx > 0 || key[len(key)-1] == '.' {
+ key = fmt.Sprintf("%s%d", key, idx)
+ }
+
+ info.Labels[key] = ch.Digest.String()
+ fields = append(fields, "labels."+key)
+ }
+ }
+
+ _, err := manager.Update(ctx, info, fields...)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return children, err
+ }
+}
+
+// FilterPlatforms is a handler wrapper which limits the descriptors returned
+// based on matching the specified platform matcher.
+func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := f(ctx, desc)
+ if err != nil {
+ return children, err
+ }
+
+ var descs []ocispec.Descriptor
+
+ if m == nil {
+ descs = children
+ } else {
+ for _, d := range children {
+ if d.Platform == nil || m.Match(*d.Platform) {
+ descs = append(descs, d)
+ }
+ }
+ }
+
+ return descs, nil
+ }
+}
+
+// LimitManifests is a handler wrapper which filters the manifest descriptors
+// returned using the provided platform.
+// The results will be ordered according to the comparison operator and
+// use the ordering in the manifests for equal matches.
+// A limit of 0 or less is considered no limit.
+// A not found error is returned if no manifest is matched.
+func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := f(ctx, desc)
+ if err != nil {
+ return children, err
+ }
+
+ // only limit manifests from an index
+ if IsIndexType(desc.MediaType) {
+ sort.SliceStable(children, func(i, j int) bool {
+ if children[i].Platform == nil {
+ return false
+ }
+ if children[j].Platform == nil {
+ return true
+ }
+ return m.Less(*children[i].Platform, *children[j].Platform)
+ })
+
+ if n > 0 {
+ if len(children) == 0 {
+ return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound)
+ }
+ if len(children) > n {
+ children = children[:n]
+ }
+ }
+ }
+ return children, nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/image.go b/vendor/github.com/containerd/containerd/v2/core/images/image.go
new file mode 100644
index 000000000000..6bc106aac530
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/image.go
@@ -0,0 +1,440 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/containerd/platforms"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Image provides the model for how containerd views container images.
+type Image struct {
+ // Name of the image.
+ //
+ // To be pulled, it must be a reference compatible with resolvers.
+ //
+ // This field is required.
+ Name string
+
+ // Labels provide runtime decoration for the image record.
+ //
+ // There is no default behavior for how these labels are propagated. They
+ // only decorate the static metadata object.
+ //
+ // This field is optional.
+ Labels map[string]string
+
+ // Target describes the root content for this image. Typically, this is
+ // a manifest, index or manifest list.
+ Target ocispec.Descriptor
+
+ CreatedAt, UpdatedAt time.Time
+}
+
+// DeleteOptions provide options on image delete
+type DeleteOptions struct {
+ Synchronous bool
+ Target *ocispec.Descriptor
+}
+
+// DeleteOpt allows configuring a delete operation
+type DeleteOpt func(context.Context, *DeleteOptions) error
+
+// SynchronousDelete is used to indicate that an image deletion and removal of
+// the image resources should occur synchronously before returning a result.
+func SynchronousDelete() DeleteOpt {
+ return func(ctx context.Context, o *DeleteOptions) error {
+ o.Synchronous = true
+ return nil
+ }
+}
+
+// DeleteTarget is used to specify the target value an image is expected
+// to have when deleting. If the image has a different target, then
+// NotFound is returned.
+func DeleteTarget(target *ocispec.Descriptor) DeleteOpt {
+ return func(ctx context.Context, o *DeleteOptions) error {
+ o.Target = target
+ return nil
+ }
+}
+
+// Store and interact with images
+type Store interface {
+ Get(ctx context.Context, name string) (Image, error)
+ List(ctx context.Context, filters ...string) ([]Image, error)
+ Create(ctx context.Context, image Image) (Image, error)
+
+ // Update will replace the data in the store with the provided image. If
+ // one or more fieldpaths are provided, only those fields will be updated.
+ Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
+
+ Delete(ctx context.Context, name string, opts ...DeleteOpt) error
+}
+
+// TODO(stevvooe): Many of these functions make strong platform assumptions,
+// which are untrue in a lot of cases. More refactoring must be done here to
+// make this work in all cases.
+
+// Config resolves the image configuration descriptor.
+//
+// The caller can then use the descriptor to resolve and process the
+// configuration of the image.
+func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
+ return Config(ctx, provider, image.Target, platform)
+}
+
+// RootFS returns the unpacked diffids that make up and images rootfs.
+//
+// These are used to verify that a set of layers unpacked to the expected
+// values.
+func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) {
+ desc, err := image.Config(ctx, provider, platform)
+ if err != nil {
+ return nil, err
+ }
+ return RootFS(ctx, provider, desc)
+}
+
+// Size returns the total size of an image's packed resources.
+func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) {
+ var size int64
+ return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if desc.Size < 0 {
+ return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
+ }
+ size += desc.Size
+ return nil, nil
+ }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target)
+}
+
+type platformManifest struct {
+ p *ocispec.Platform
+ m *ocispec.Manifest
+}
+
+// Manifest resolves a manifest from the image for the given platform.
+//
+// When a manifest descriptor inside of a manifest index does not have
+// a platform defined, the platform from the image config is considered.
+//
+// If the descriptor points to a non-index manifest, then the manifest is
+// unmarshalled and returned without considering the platform inside of the
+// config.
+//
+// TODO(stevvooe): This violates the current platform agnostic approach to this
+// package by returning a specific manifest type. We'll need to refactor this
+// to return a manifest descriptor or decide that we want to bring the API in
+// this direction because this abstraction is not needed.
+func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
+ var (
+ limit = 1
+ m []platformManifest
+ wasIndex bool
+ )
+
+ if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if IsManifestType(desc.MediaType) {
+ p, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := validateMediaType(p, desc.MediaType); err != nil {
+ return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
+ }
+
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(p, &manifest); err != nil {
+ return nil, err
+ }
+
+ if desc.Digest != image.Digest && platform != nil {
+ if desc.Platform != nil && !platform.Match(*desc.Platform) {
+ return nil, nil
+ }
+
+ if desc.Platform == nil {
+ imagePlatform, err := ConfigPlatform(ctx, provider, manifest.Config)
+ if err != nil {
+ return nil, err
+ }
+ if !platform.Match(imagePlatform) {
+ return nil, nil
+ }
+
+ }
+ }
+
+ m = append(m, platformManifest{
+ p: desc.Platform,
+ m: &manifest,
+ })
+
+ return nil, nil
+ } else if IsIndexType(desc.MediaType) {
+ p, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := validateMediaType(p, desc.MediaType); err != nil {
+ return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
+ }
+
+ var idx ocispec.Index
+ if err := json.Unmarshal(p, &idx); err != nil {
+ return nil, err
+ }
+
+ if platform == nil {
+ return idx.Manifests, nil
+ }
+
+ var descs []ocispec.Descriptor
+ for _, d := range idx.Manifests {
+ if d.Platform == nil || platform.Match(*d.Platform) {
+ descs = append(descs, d)
+ }
+ }
+
+ sort.SliceStable(descs, func(i, j int) bool {
+ if descs[i].Platform == nil {
+ return false
+ }
+ if descs[j].Platform == nil {
+ return true
+ }
+ return platform.Less(*descs[i].Platform, *descs[j].Platform)
+ })
+
+ wasIndex = true
+
+ if len(descs) > limit {
+ return descs[:limit], nil
+ }
+ return descs, nil
+ }
+ return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound)
+ }), image); err != nil {
+ return ocispec.Manifest{}, err
+ }
+
+ if len(m) == 0 {
+ err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound)
+ if wasIndex {
+ err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound)
+ }
+ return ocispec.Manifest{}, err
+ }
+ return *m[0].m, nil
+}
+
+// Config resolves the image configuration descriptor using a content provided
+// to resolve child resources on the image.
+//
+// The caller can then use the descriptor to resolve and process the
+// configuration of the image.
+func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
+ manifest, err := Manifest(ctx, provider, image, platform)
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
+ return manifest.Config, nil
+}
+
+// Platforms returns one or more platforms supported by the image.
+func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) {
+ var platformSpecs []ocispec.Platform
+ return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if desc.Platform != nil {
+ if desc.Platform.OS == "unknown" || desc.Platform.Architecture == "unknown" {
+ return nil, ErrSkipDesc
+ }
+ platformSpecs = append(platformSpecs, *desc.Platform)
+ return nil, ErrSkipDesc
+ }
+
+ if IsConfigType(desc.MediaType) {
+ imagePlatform, err := ConfigPlatform(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+ platformSpecs = append(platformSpecs, imagePlatform)
+ }
+ return nil, nil
+ }), ChildrenHandler(provider)), image)
+}
+
+// Check returns nil if the all components of an image are available in the
+// provider for the specified platform.
+//
+// If available is true, the caller can assume that required represents the
+// complete set of content required for the image.
+//
+// missing will have the components that are part of required but not available
+// in the provider.
+//
+// If there is a problem resolving content, an error will be returned.
+func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) {
+ mfst, err := Manifest(ctx, provider, image, platform)
+ if err != nil {
+ if errdefs.IsNotFound(err) {
+ return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
+ }
+
+ return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err)
+ }
+
+ // TODO(stevvooe): It is possible that referenced components could have
+ // children, but this is rare. For now, we ignore this and only verify
+ // that manifest components are present.
+ required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...)
+
+ for _, desc := range required {
+ ra, err := provider.ReaderAt(ctx, desc)
+ if err != nil {
+ if errdefs.IsNotFound(err) {
+ missing = append(missing, desc)
+ continue
+ } else {
+ return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err)
+ }
+ }
+ ra.Close()
+ present = append(present, desc)
+
+ }
+
+ return true, required, present, missing, nil
+}
+
+// Children returns the immediate children of content described by the descriptor.
+func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if IsManifestType(desc.MediaType) {
+ p, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := validateMediaType(p, desc.MediaType); err != nil {
+ return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
+ }
+
+ // TODO(stevvooe): We just assume oci manifest, for now. There may be
+ // subtle differences from the docker version.
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(p, &manifest); err != nil {
+ return nil, err
+ }
+
+ return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil
+ } else if IsIndexType(desc.MediaType) {
+ p, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := validateMediaType(p, desc.MediaType); err != nil {
+ return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
+ }
+
+ var index ocispec.Index
+ if err := json.Unmarshal(p, &index); err != nil {
+ return nil, err
+ }
+
+ return append([]ocispec.Descriptor{}, index.Manifests...), nil
+ } else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) {
+ // Layers and configs are childless data types and should not be logged.
+ log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType)
+ }
+ return nil, nil
+}
+
+// unknownDocument represents a manifest, manifest list, or index that has not
+// yet been validated.
+type unknownDocument struct {
+ MediaType string `json:"mediaType,omitempty"`
+ Config json.RawMessage `json:"config,omitempty"`
+ Layers json.RawMessage `json:"layers,omitempty"`
+ Manifests json.RawMessage `json:"manifests,omitempty"`
+ FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1
+}
+
+// validateMediaType returns an error if the byte slice is invalid JSON,
+// if the format of the blob is not supported, or if the media type
+// identifies the blob as one format, but it identifies itself as, or
+// contains elements of another format.
+func validateMediaType(b []byte, mt string) error {
+ var doc unknownDocument
+ if err := json.Unmarshal(b, &doc); err != nil {
+ return err
+ }
+ if len(doc.FSLayers) != 0 {
+ return fmt.Errorf("media-type: schema 1 not supported")
+ }
+ if IsManifestType(mt) && (len(doc.Manifests) != 0 || IsIndexType(doc.MediaType)) {
+ return fmt.Errorf("media-type: expected manifest but found index (%s)", mt)
+ } else if IsIndexType(mt) && (len(doc.Config) != 0 || len(doc.Layers) != 0 || IsManifestType(doc.MediaType)) {
+ return fmt.Errorf("media-type: expected index but found manifest (%s)", mt)
+ }
+ return nil
+}
+
+// RootFS returns the unpacked diffids that make up and images rootfs.
+//
+// These are used to verify that a set of layers unpacked to the expected
+// values.
+func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) {
+ p, err := content.ReadBlob(ctx, provider, configDesc)
+ if err != nil {
+ return nil, err
+ }
+
+ var config ocispec.Image
+ if err := json.Unmarshal(p, &config); err != nil {
+ return nil, err
+ }
+ return config.RootFS.DiffIDs, nil
+}
+
+// ConfigPlatform returns a normalized platform from an image manifest config.
+func ConfigPlatform(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) (ocispec.Platform, error) {
+ p, err := content.ReadBlob(ctx, provider, configDesc)
+ if err != nil {
+ return ocispec.Platform{}, err
+ }
+
+ // Technically, this should be ocispec.Image, but we only need the
+ // ocispec.Platform that is embedded in the image struct.
+ var imagePlatform ocispec.Platform
+ if err := json.Unmarshal(p, &imagePlatform); err != nil {
+ return ocispec.Platform{}, err
+ }
+ return platforms.Normalize(imagePlatform), nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/importexport.go b/vendor/github.com/containerd/containerd/v2/core/images/importexport.go
new file mode 100644
index 000000000000..601d545ef0e8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/importexport.go
@@ -0,0 +1,37 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+import (
+ "context"
+ "io"
+
+ "github.com/containerd/containerd/v2/core/content"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Importer is the interface for image importer.
+type Importer interface {
+ // Import imports an image from a tar stream.
+ Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error)
+}
+
+// Exporter is the interface for image exporter.
+type Exporter interface {
+ // Export exports an image to a tar stream.
+ Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/labels.go b/vendor/github.com/containerd/containerd/v2/core/images/labels.go
new file mode 100644
index 000000000000..06dfed572d43
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/labels.go
@@ -0,0 +1,21 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+const (
+ ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go b/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go
new file mode 100644
index 000000000000..d2e845b16d52
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go
@@ -0,0 +1,222 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package images
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/containerd/errdefs"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// mediatype definitions for image components handled in containerd.
+//
+// oci components are generally referenced directly, although we may centralize
+// here for clarity.
+const (
+ MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar"
+ MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar"
+ MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ MediaTypeDockerSchema2LayerZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd"
+ MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+ MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
+ MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
+ MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
+
+ // Checkpoint/Restore Media Types
+
+ MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
+ MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
+ MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar"
+ MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar"
+ MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto"
+ MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto"
+ MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name"
+ MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto"
+
+ // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest
+ MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+
+ // Encrypted media types
+
+ MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted"
+ MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted"
+)
+
+// DiffCompression returns the compression as defined by the layer diff media
+// type. For Docker media types without compression, "unknown" is returned to
+// indicate that the media type may be compressed. If the media type is not
+// recognized as a layer diff, then it returns errdefs.ErrNotImplemented
+func DiffCompression(ctx context.Context, mediaType string) (string, error) {
+ base, ext := parseMediaTypes(mediaType)
+ switch base {
+ case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign:
+ if len(ext) > 0 {
+ // Type is wrapped
+ return "", nil
+ }
+ // These media types may have been compressed but failed to
+ // use the correct media type. The decompression function
+ // should detect and handle this case.
+ return "unknown", nil
+ case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip:
+ if len(ext) > 0 {
+ // Type is wrapped
+ return "", nil
+ }
+ return "gzip", nil
+ case MediaTypeDockerSchema2LayerZstd:
+ if len(ext) > 0 {
+ // Type is wrapped
+ return "", nil
+ }
+ return "zstd", nil
+ case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // Non-distributable layers are deprecated
+ if len(ext) > 0 {
+ switch ext[len(ext)-1] {
+ case "gzip":
+ return "gzip", nil
+ case "zstd":
+ return "zstd", nil
+ }
+ }
+ return "", nil
+ default:
+ return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented)
+ }
+}
+
+// parseMediaTypes splits the media type into the base type and
+// an array of sorted extensions
+func parseMediaTypes(mt string) (mediaType string, suffixes []string) {
+ if mt == "" {
+ return "", []string{}
+ }
+ mediaType, ext, ok := strings.Cut(mt, "+")
+ if !ok {
+ return mediaType, []string{}
+ }
+
+ // Splitting the extensions following the mediatype "(+)gzip+encrypted".
+ // We expect this to be a limited list, so add an arbitrary limit (50).
+ //
+ // Note that DiffCompression is only using the last element, so perhaps we
+ // should split on the last "+" only.
+ suffixes = strings.SplitN(ext, "+", 50)
+ sort.Strings(suffixes)
+ return mediaType, suffixes
+}
+
+// IsNonDistributable returns true if the media type is non-distributable.
+func IsNonDistributable(mt string) bool {
+ return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") ||
+ strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.")
+}
+
+// IsLayerType returns true if the media type is a layer
+func IsLayerType(mt string) bool {
+ if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") {
+ return true
+ }
+
+ // Parse Docker media types, strip off any + suffixes first
+ switch base, _ := parseMediaTypes(mt); base {
+ case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
+ MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, MediaTypeDockerSchema2LayerZstd:
+ return true
+ }
+ return false
+}
+
+// IsDockerType returns true if the media type has "application/vnd.docker." prefix
+func IsDockerType(mt string) bool {
+ return strings.HasPrefix(mt, "application/vnd.docker.")
+}
+
+// IsManifestType returns true if the media type is an OCI-compatible manifest.
+// No support for schema1 manifest.
+func IsManifestType(mt string) bool {
+ switch mt {
+ case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsIndexType returns true if the media type is an OCI-compatible index.
+func IsIndexType(mt string) bool {
+ switch mt {
+ case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsConfigType returns true if the media type is an OCI-compatible image config.
+// No support for containerd checkpoint configs.
+func IsConfigType(mt string) bool {
+ switch mt {
+ case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsKnownConfig returns true if the media type is a known config type,
+// including containerd checkpoint configs
+func IsKnownConfig(mt string) bool {
+ switch mt {
+ case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
+ MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
+ return true
+ }
+ return false
+}
+
+// ChildGCLabels returns the label for a given descriptor to reference it
+func ChildGCLabels(desc ocispec.Descriptor) []string {
+ mt := desc.MediaType
+ if IsKnownConfig(mt) {
+ return []string{"containerd.io/gc.ref.content.config"}
+ }
+
+ switch mt {
+ case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+ return []string{"containerd.io/gc.ref.content.m."}
+ }
+
+ if IsLayerType(mt) {
+ return []string{"containerd.io/gc.ref.content.l."}
+ }
+
+ return []string{"containerd.io/gc.ref.content."}
+}
+
+// ChildGCLabelsFilterLayers returns the labels for a given descriptor to
+// reference it, skipping layer media types
+func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string {
+ if IsLayerType(desc.MediaType) {
+ return nil
+ }
+ return ChildGCLabels(desc)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/leases/context.go b/vendor/github.com/containerd/containerd/v2/core/leases/context.go
new file mode 100644
index 000000000000..599c549d31bb
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/leases/context.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package leases
+
+import "context"
+
+type leaseKey struct{}
+
+// WithLease sets a given lease on the context
+func WithLease(ctx context.Context, lid string) context.Context {
+ ctx = context.WithValue(ctx, leaseKey{}, lid)
+
+ // also store on the grpc headers so it gets picked up by any clients that
+ // are using this.
+ return withGRPCLeaseHeader(ctx, lid)
+}
+
+// FromContext returns the lease from the context.
+func FromContext(ctx context.Context) (string, bool) {
+ lid, ok := ctx.Value(leaseKey{}).(string)
+ if !ok {
+ return fromGRPCHeader(ctx)
+ }
+
+ return lid, ok
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/leases/grpc.go b/vendor/github.com/containerd/containerd/v2/core/leases/grpc.go
new file mode 100644
index 000000000000..22f287a8bfc6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/leases/grpc.go
@@ -0,0 +1,58 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package leases
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ // GRPCHeader defines the header name for specifying a containerd lease.
+ GRPCHeader = "containerd-lease"
+)
+
+func withGRPCLeaseHeader(ctx context.Context, lid string) context.Context {
+ // also store on the grpc headers so it gets picked up by any clients
+ // that are using this.
+ txheader := metadata.Pairs(GRPCHeader, lid)
+ md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context.
+ if !ok {
+ md = txheader
+ } else {
+ // order ensures the latest is first in this list.
+ md = metadata.Join(txheader, md)
+ }
+
+ return metadata.NewOutgoingContext(ctx, md)
+}
+
+func fromGRPCHeader(ctx context.Context) (string, bool) {
+ // try to extract for use in grpc servers.
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return "", false
+ }
+
+ values := md[GRPCHeader]
+ if len(values) == 0 {
+ return "", false
+ }
+
+ return values[0], true
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/leases/id.go b/vendor/github.com/containerd/containerd/v2/core/leases/id.go
new file mode 100644
index 000000000000..8f5dc93f348e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/leases/id.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package leases
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "time"
+)
+
+// WithRandomID sets the lease ID to a random unique value
+func WithRandomID() Opt {
+ return func(l *Lease) error {
+ t := time.Now()
+ var b [3]byte
+ rand.Read(b[:])
+ l.ID = fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:]))
+ return nil
+ }
+}
+
+// WithID sets the ID for the lease
+func WithID(id string) Opt {
+ return func(l *Lease) error {
+ l.ID = id
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/leases/lease.go b/vendor/github.com/containerd/containerd/v2/core/leases/lease.go
new file mode 100644
index 000000000000..d842c4081a42
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/leases/lease.go
@@ -0,0 +1,104 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package leases
+
+import (
+ "context"
+ "time"
+)
+
+// Opt is used to set options on a lease
+type Opt func(*Lease) error
+
+// DeleteOpt allows configuring a delete operation
+type DeleteOpt func(context.Context, *DeleteOptions) error
+
+// Manager is used to create, list, and remove leases
+type Manager interface {
+ Create(context.Context, ...Opt) (Lease, error)
+ Delete(context.Context, Lease, ...DeleteOpt) error
+ List(context.Context, ...string) ([]Lease, error)
+ AddResource(context.Context, Lease, Resource) error
+ DeleteResource(context.Context, Lease, Resource) error
+ ListResources(context.Context, Lease) ([]Resource, error)
+}
+
+// Lease retains resources to prevent cleanup before
+// the resources can be fully referenced.
+type Lease struct {
+ ID string
+ CreatedAt time.Time
+ Labels map[string]string
+}
+
+// Resource represents low level resource of image, like content, ingest and
+// snapshotter.
+type Resource struct {
+ ID string
+ Type string
+}
+
+// DeleteOptions provide options on image delete
+type DeleteOptions struct {
+ Synchronous bool
+}
+
+// SynchronousDelete is used to indicate that a lease deletion and removal of
+// any unreferenced resources should occur synchronously before returning the
+// result.
+func SynchronousDelete(ctx context.Context, o *DeleteOptions) error {
+ o.Synchronous = true
+ return nil
+}
+
+// WithLabel sets a label on a lease, and merges it with existing labels.
+// It overwrites the existing value of the given label (if present).
+func WithLabel(label, value string) Opt {
+ return func(l *Lease) error {
+ if l.Labels == nil {
+ l.Labels = map[string]string{label: value}
+ return nil
+ }
+ l.Labels[label] = value
+ return nil
+ }
+}
+
+// WithLabels merges labels on a lease
+func WithLabels(labels map[string]string) Opt {
+ return func(l *Lease) error {
+ if l.Labels == nil {
+ l.Labels = map[string]string{}
+ }
+ for k, v := range labels {
+ l.Labels[k] = v
+ }
+ return nil
+ }
+}
+
+// WithExpiration sets an expiration on the lease
+func WithExpiration(d time.Duration) Opt {
+ return func(l *Lease) error {
+ if l.Labels == nil {
+ l.Labels = map[string]string{}
+ }
+ l.Labels["containerd.io/gc.expire"] = time.Now().Add(d).Format(time.RFC3339)
+
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/v2/core/metadata/boltutil/helpers.go
new file mode 100644
index 000000000000..30df2e7a91f2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/metadata/boltutil/helpers.go
@@ -0,0 +1,238 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package boltutil
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/containerd/containerd/v2/pkg/protobuf/proto"
+ "github.com/containerd/containerd/v2/pkg/protobuf/types"
+ "github.com/containerd/typeurl/v2"
+ bolt "go.etcd.io/bbolt"
+)
+
+var (
+ bucketKeyAnnotations = []byte("annotations")
+ bucketKeyLabels = []byte("labels")
+ bucketKeyCreatedAt = []byte("createdat")
+ bucketKeyUpdatedAt = []byte("updatedat")
+ bucketKeyExtensions = []byte("extensions")
+)
+
+// ReadLabels reads the labels key from the bucket
+// Uses the key "labels"
+func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
+ return readMap(bkt, bucketKeyLabels)
+}
+
+// ReadAnnotations reads the OCI Descriptor Annotations key from the bucket
+// Uses the key "annotations"
+func ReadAnnotations(bkt *bolt.Bucket) (map[string]string, error) {
+ return readMap(bkt, bucketKeyAnnotations)
+}
+
+func readMap(bkt *bolt.Bucket, bucketName []byte) (map[string]string, error) {
+ lbkt := bkt.Bucket(bucketName)
+ if lbkt == nil {
+ return nil, nil
+ }
+ labels := map[string]string{}
+ if err := lbkt.ForEach(func(k, v []byte) error {
+ labels[string(k)] = string(v)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return labels, nil
+}
+
+// WriteLabels will write a new labels bucket to the provided bucket at key
+// bucketKeyLabels, replacing the contents of the bucket with the provided map.
+//
+// The provide map labels will be modified to have the final contents of the
+// bucket. Typically, this removes zero-value entries.
+// Uses the key "labels"
+func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
+ return writeMap(bkt, bucketKeyLabels, labels)
+}
+
+// WriteAnnotations writes the OCI Descriptor Annotations
+func WriteAnnotations(bkt *bolt.Bucket, labels map[string]string) error {
+ return writeMap(bkt, bucketKeyAnnotations, labels)
+}
+
+func writeMap(bkt *bolt.Bucket, bucketName []byte, labels map[string]string) error {
+ // Remove existing labels to keep from merging
+ if lbkt := bkt.Bucket(bucketName); lbkt != nil {
+ if err := bkt.DeleteBucket(bucketName); err != nil {
+ return err
+ }
+ }
+
+ if len(labels) == 0 {
+ return nil
+ }
+
+ lbkt, err := bkt.CreateBucket(bucketName)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range labels {
+ if v == "" {
+ delete(labels, k) // remove since we don't actually set it
+ continue
+ }
+
+ if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
+ return fmt.Errorf("failed to set label %q=%q: %w", k, v, err)
+ }
+ }
+
+ return nil
+}
+
+// ReadTimestamps reads created and updated timestamps from a bucket.
+// Uses keys "createdat" and "updatedat"
+func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error {
+ for _, f := range []struct {
+ b []byte
+ t *time.Time
+ }{
+ {bucketKeyCreatedAt, created},
+ {bucketKeyUpdatedAt, updated},
+ } {
+ v := bkt.Get(f.b)
+ if v != nil {
+ if err := f.t.UnmarshalBinary(v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// WriteTimestamps writes created and updated timestamps to a bucket.
+// Uses keys "createdat" and "updatedat"
+func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error {
+ createdAt, err := created.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ updatedAt, err := updated.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ for _, v := range [][2][]byte{
+ {bucketKeyCreatedAt, createdAt},
+ {bucketKeyUpdatedAt, updatedAt},
+ } {
+ if err := bkt.Put(v[0], v[1]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// WriteExtensions will write a KV map to the given bucket,
+// where `K` is a string key and `V` is a protobuf's Any type that represents a generic extension.
+func WriteExtensions(bkt *bolt.Bucket, extensions map[string]typeurl.Any) error {
+ if len(extensions) == 0 {
+ return nil
+ }
+
+ ebkt, err := bkt.CreateBucketIfNotExists(bucketKeyExtensions)
+ if err != nil {
+ return err
+ }
+
+ for name, ext := range extensions {
+ ext := typeurl.MarshalProto(ext)
+ p, err := proto.Marshal(ext)
+ if err != nil {
+ return err
+ }
+
+ if err := ebkt.Put([]byte(name), p); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ReadExtensions will read back a map of extensions from the given bucket, previously written by WriteExtensions
+func ReadExtensions(bkt *bolt.Bucket) (map[string]typeurl.Any, error) {
+ var (
+ extensions = make(map[string]typeurl.Any)
+ ebkt = bkt.Bucket(bucketKeyExtensions)
+ )
+
+ if ebkt == nil {
+ return extensions, nil
+ }
+
+ if err := ebkt.ForEach(func(k, v []byte) error {
+ var t types.Any
+ if err := proto.Unmarshal(v, &t); err != nil {
+ return err
+ }
+
+ extensions[string(k)] = &t
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ return extensions, nil
+}
+
+// WriteAny write a protobuf's Any type to the bucket
+func WriteAny(bkt *bolt.Bucket, name []byte, any typeurl.Any) error {
+ pbany := typeurl.MarshalProto(any)
+ if pbany == nil {
+ return nil
+ }
+
+ data, err := proto.Marshal(pbany)
+ if err != nil {
+ return fmt.Errorf("failed to marshal: %w", err)
+ }
+
+ if err := bkt.Put(name, data); err != nil {
+ return fmt.Errorf("put failed: %w", err)
+ }
+
+ return nil
+}
+
+// ReadAny reads back protobuf's Any type from the bucket
+func ReadAny(bkt *bolt.Bucket, name []byte) (*types.Any, error) {
+ bytes := bkt.Get(name)
+ if bytes == nil {
+ return nil, nil
+ }
+
+ out := types.Any{}
+ if err := proto.Unmarshal(bytes, &out); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal any: %w", err)
+ }
+
+ return &out, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/lookup_unix.go b/vendor/github.com/containerd/containerd/v2/core/mount/lookup_unix.go
new file mode 100644
index 000000000000..90a0d5a5f263
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/lookup_unix.go
@@ -0,0 +1,51 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "fmt"
+
+ "github.com/moby/sys/mountinfo"
+)
+
+// Lookup returns the mount info corresponds to the path.
+func Lookup(dir string) (Info, error) {
+ resolvedDir, err := CanonicalizePath(dir)
+ if err != nil {
+ return Info{}, err
+ }
+
+ m, err := mountinfo.GetMounts(mountinfo.ParentsFilter(resolvedDir))
+ if err != nil {
+ return Info{}, fmt.Errorf("failed to find the mount info for %q: %w", resolvedDir, err)
+ }
+ if len(m) == 0 {
+ return Info{}, fmt.Errorf("failed to find the mount info for %q", resolvedDir)
+ }
+
+ // find the longest matching mount point
+ var idx, maxlen int
+ for i := range m {
+ if len(m[i].Mountpoint) > maxlen {
+ maxlen = len(m[i].Mountpoint)
+ idx = i
+ }
+ }
+ return *m[idx], nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/lookup_unsupported.go b/vendor/github.com/containerd/containerd/v2/core/mount/lookup_unsupported.go
new file mode 100644
index 000000000000..1daf96d5c93f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/lookup_unsupported.go
@@ -0,0 +1,29 @@
+//go:build windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// Lookup returns the mount info corresponds to the path.
+func Lookup(dir string) (Info, error) {
+ return Info{}, fmt.Errorf("mount.Lookup is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/losetup_linux.go b/vendor/github.com/containerd/containerd/v2/core/mount/losetup_linux.go
new file mode 100644
index 000000000000..a08db7a3a24f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/losetup_linux.go
@@ -0,0 +1,222 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/containerd/containerd/v2/internal/randutil"
+ kernel "github.com/containerd/containerd/v2/pkg/kernelversion"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ loopControlPath = "/dev/loop-control"
+ loopDevFormat = "/dev/loop%d"
+
+ ebusyString = "device or resource busy"
+)
+
+// LoopParams parameters to control loop device setup
+type LoopParams struct {
+ // Loop device should forbid write
+ Readonly bool
+ // Loop device is automatically cleared by kernel when the
+ // last opener closes it
+ Autoclear bool
+ // Use direct IO to access the loop backing file
+ Direct bool
+}
+
+func getFreeLoopDev() (uint32, error) {
+ ctrl, err := os.OpenFile(loopControlPath, os.O_RDWR, 0)
+ if err != nil {
+ return 0, fmt.Errorf("could not open %v: %v", loopControlPath, err)
+ }
+ defer ctrl.Close()
+ num, err := unix.IoctlRetInt(int(ctrl.Fd()), unix.LOOP_CTL_GET_FREE)
+ if err != nil {
+ return 0, fmt.Errorf("could not get free loop device: %w", err)
+ }
+ return uint32(num), nil
+}
+
+// setupLoopDev attaches the backing file to the loop device and returns
+// the file handle for the loop device. The caller is responsible for
+// closing the file handle.
+func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, retErr error) {
+ // 1. Open backing file and loop device
+ flags := os.O_RDWR
+ if param.Readonly {
+ flags = os.O_RDONLY
+ }
+
+ back, err := os.OpenFile(backingFile, flags, 0)
+ if err != nil {
+ return nil, fmt.Errorf("could not open backing file: %s: %w", backingFile, err)
+ }
+ defer back.Close()
+
+ loop, err := os.OpenFile(loopDev, flags, 0)
+ if err != nil {
+ return nil, fmt.Errorf("could not open loop device: %s: %w", loopDev, err)
+ }
+ defer func() {
+ if retErr != nil {
+ loop.Close()
+ }
+ }()
+
+ fiveDotEight := kernel.KernelVersion{Kernel: 5, Major: 8}
+ if ok, err := kernel.GreaterEqualThan(fiveDotEight); err == nil && ok {
+ config := unix.LoopConfig{
+ Fd: uint32(back.Fd()),
+ }
+
+ copy(config.Info.File_name[:], backingFile)
+ if param.Readonly {
+ config.Info.Flags |= unix.LO_FLAGS_READ_ONLY
+ }
+
+ if param.Autoclear {
+ config.Info.Flags |= unix.LO_FLAGS_AUTOCLEAR
+ }
+
+ if param.Direct {
+ config.Info.Flags |= unix.LO_FLAGS_DIRECT_IO
+ }
+
+ if err := unix.IoctlLoopConfigure(int(loop.Fd()), &config); err != nil {
+ return nil, fmt.Errorf("failed to configure loop device: %s: %w", loopDev, err)
+ }
+
+ return loop, nil
+ }
+
+ // 2. Set FD
+ if err := unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_SET_FD, int(back.Fd())); err != nil {
+ return nil, fmt.Errorf("could not set loop fd for device: %s: %w", loopDev, err)
+ }
+
+ defer func() {
+ if retErr != nil {
+ _ = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_CLR_FD, 0)
+ }
+ }()
+
+ // 3. Set Info
+ info := unix.LoopInfo64{}
+ copy(info.File_name[:], backingFile)
+ if param.Readonly {
+ info.Flags |= unix.LO_FLAGS_READ_ONLY
+ }
+
+ if param.Autoclear {
+ info.Flags |= unix.LO_FLAGS_AUTOCLEAR
+ }
+
+ err = unix.IoctlLoopSetStatus64(int(loop.Fd()), &info)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set loop device info: %w", err)
+ }
+
+ // 4. Set Direct IO
+ if param.Direct {
+ err = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_SET_DIRECT_IO, 1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to setup loop with direct: %w", err)
+ }
+ }
+
+ return loop, nil
+}
+
+// setupLoop looks for (and possibly creates) a free loop device, and
+// then attaches backingFile to it.
+//
+// When autoclear is true, caller should take care to close it when
+// done with the loop device. The loop device file handle keeps
+// loFlagsAutoclear in effect and we rely on it to clean up the loop
+// device. If caller closes the file handle after mounting the device,
+// kernel will clear the loop device after it is umounted. Otherwise
+// the loop device is cleared when the file handle is closed.
+//
+// When autoclear is false, caller should be responsible to remove
+// the loop device when done with it.
+//
+// Upon success, the file handle to the loop device is returned.
+func setupLoop(backingFile string, param LoopParams) (*os.File, error) {
+ for retry := 1; retry < 100; retry++ {
+ num, err := getFreeLoopDev()
+ if err != nil {
+ return nil, err
+ }
+
+ loopDev := fmt.Sprintf(loopDevFormat, num)
+ file, err := setupLoopDev(backingFile, loopDev, param)
+ if err != nil {
+ // Per util-linux/sys-utils/losetup.c:create_loop(),
+ // free loop device can race and we end up failing
+ // with EBUSY when trying to set it up.
+ if strings.Contains(err.Error(), ebusyString) {
+ // Fallback a bit to avoid live lock
+ time.Sleep(time.Millisecond * time.Duration(randutil.Intn(retry*10)))
+ continue
+ }
+ return nil, err
+ }
+
+ return file, nil
+ }
+
+ return nil, errors.New("timeout creating new loopback device")
+}
+
+func removeLoop(loopdev string) error {
+ file, err := os.Open(loopdev)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ return unix.IoctlSetInt(int(file.Fd()), unix.LOOP_CLR_FD, 0)
+}
+
+// AttachLoopDevice attaches a specified backing file to a loop device
+func AttachLoopDevice(backingFile string) (string, error) {
+ file, err := setupLoop(backingFile, LoopParams{})
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+ return file.Name(), nil
+}
+
+// DetachLoopDevice detaches the provided loop devices
+func DetachLoopDevice(devices ...string) error {
+ for _, dev := range devices {
+ if err := removeLoop(dev); err != nil {
+ return fmt.Errorf("failed to remove loop device: %s: %w", dev, err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount.go
new file mode 100644
index 000000000000..16865b4f443b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount.go
@@ -0,0 +1,176 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containerd/containerd/api/types"
+ "github.com/containerd/continuity/fs"
+)
+
+// Mount is the lingua franca of containerd. A mount represents a
+// serialized mount syscall. Components either emit or consume mounts.
+type Mount struct {
+ // Type specifies the host-specific of the mount.
+ Type string
+ // Source specifies where to mount from. Depending on the host system, this
+ // can be a source path or device.
+ Source string
+ // Target specifies an optional subdirectory as a mountpoint. It assumes that
+ // the subdirectory exists in a parent mount.
+ Target string
+ // Options contains zero or more fstab-style mount options. Typically,
+ // these are platform specific.
+ Options []string
+}
+
+// All mounts all the provided mounts to the provided target. If submounts are
+// present, it assumes that parent mounts come before child mounts.
+func All(mounts []Mount, target string) error {
+ for _, m := range mounts {
+ if err := m.Mount(target); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmountMounts unmounts all the mounts under a target in the reverse order of
+// the mounts array provided.
+func UnmountMounts(mounts []Mount, target string, flags int) error {
+ for i := len(mounts) - 1; i >= 0; i-- {
+ mountpoint, err := fs.RootPath(target, mounts[i].Target)
+ if err != nil {
+ return err
+ }
+
+ if err := UnmountAll(mountpoint, flags); err != nil {
+ if i == len(mounts)-1 { // last mount
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// CanonicalizePath makes path absolute and resolves symlinks in it.
+// Path must exist.
+func CanonicalizePath(path string) (string, error) {
+ // Abs also does Clean, so we do not need to call it separately
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.EvalSymlinks(path)
+}
+
+// ReadOnly returns a boolean value indicating whether this mount has the "ro"
+// option set.
+func (m *Mount) ReadOnly() bool {
+ for _, option := range m.Options {
+ if option == "ro" {
+ return true
+ }
+ }
+ return false
+}
+
+// Mount to the provided target path.
+func (m *Mount) Mount(target string) error {
+ target, err := fs.RootPath(target, m.Target)
+ if err != nil {
+ return fmt.Errorf("failed to join path %q with root %q: %w", m.Target, target, err)
+ }
+ return m.mount(target)
+}
+
+// readonlyMounts modifies the received mount options
+// to make them readonly
+func readonlyMounts(mounts []Mount) []Mount {
+ for i, m := range mounts {
+ if m.Type == "overlay" {
+ mounts[i].Options = readonlyOverlay(m.Options)
+ continue
+ }
+ opts := make([]string, 0, len(m.Options))
+ for _, opt := range m.Options {
+ if opt != "rw" && opt != "ro" { // skip `ro` too so we don't append it twice
+ opts = append(opts, opt)
+ }
+ }
+ opts = append(opts, "ro")
+ mounts[i].Options = opts
+ }
+ return mounts
+}
+
+// readonlyOverlay takes mount options for overlay mounts and makes them readonly by
+// removing workdir and upperdir (and appending the upperdir layer to lowerdir) - see:
+// https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#multiple-lower-layers
+func readonlyOverlay(opt []string) []string {
+ out := make([]string, 0, len(opt))
+ upper := ""
+ for _, o := range opt {
+ if strings.HasPrefix(o, "upperdir=") {
+ upper = strings.TrimPrefix(o, "upperdir=")
+ } else if !strings.HasPrefix(o, "workdir=") {
+ out = append(out, o)
+ }
+ }
+ if upper != "" {
+ for i, o := range out {
+ if strings.HasPrefix(o, "lowerdir=") {
+ out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=")
+ }
+ }
+ }
+ return out
+}
+
+// ToProto converts from [Mount] to the containerd
+// APIs protobuf definition of a Mount.
+func ToProto(mounts []Mount) []*types.Mount {
+ apiMounts := make([]*types.Mount, len(mounts))
+ for i, m := range mounts {
+ apiMounts[i] = &types.Mount{
+ Type: m.Type,
+ Source: m.Source,
+ Target: m.Target,
+ Options: m.Options,
+ }
+ }
+ return apiMounts
+}
+
+// FromProto converts from the protobuf definition [types.Mount] to
+// [Mount].
+func FromProto(mm []*types.Mount) []Mount {
+ mounts := make([]Mount, len(mm))
+ for i, m := range mm {
+ mounts[i] = Mount{
+ Type: m.Type,
+ Source: m.Source,
+ Target: m.Target,
+ Options: m.Options,
+ }
+ }
+ return mounts
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount_freebsd.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount_freebsd.go
new file mode 100644
index 000000000000..8b384267fe4f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount_freebsd.go
@@ -0,0 +1,133 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // ErrNotImplementOnUnix is returned for methods that are not implemented
+ ErrNotImplementOnUnix = errors.New("not implemented under unix")
+)
+
+// Mount to the provided target.
+//
+// The "syscall" and "golang.org/x/sys/unix" packages do not define a Mount
+// function for FreeBSD, so instead we execute mount(8) and trust it to do
+// the right thing
+func (m *Mount) mount(target string) error {
+ // target: "/foo/target"
+ // command: "mount -o ro -t nullfs /foo/source /foo/merged"
+ // Note: FreeBSD mount(8) is particular about the order of flags and arguments
+ var args []string
+ for _, o := range m.Options {
+ args = append(args, "-o", o)
+ }
+ args = append(args, "-t", m.Type)
+ args = append(args, m.Source, target)
+
+ infoBeforeMount, err := Lookup(target)
+ if err != nil {
+ return err
+ }
+
+ // cmd.CombinedOutput() may intermittently return ECHILD because of our signal handling in shim.
+ // See #4387 and wait(2).
+ const retriesOnECHILD = 10
+ for i := 0; i < retriesOnECHILD; i++ {
+ cmd := exec.Command("mount", args...)
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ return nil
+ }
+ if !errors.Is(err, unix.ECHILD) {
+ return fmt.Errorf("mount [%v] failed: %q: %w", args, string(out), err)
+ }
+ // We got ECHILD, we are not sure whether the mount was successful.
+ // If the mount ID has changed, we are sure we got some new mount, but still not sure it is fully completed.
+ // So we attempt to unmount the new mount before retrying.
+ infoAfterMount, err := Lookup(target)
+ if err != nil {
+ return err
+ }
+ if infoAfterMount.ID != infoBeforeMount.ID {
+ _ = unmount(target, 0)
+ }
+ }
+ return fmt.Errorf("mount [%v] failed with ECHILD (retried %d times)", args, retriesOnECHILD)
+}
+
+// Unmount the provided mount path with the flags
+func Unmount(target string, flags int) error {
+ if err := unmount(target, flags); err != nil && err != unix.EINVAL {
+ return err
+ }
+ return nil
+}
+
+func unmount(target string, flags int) error {
+ for i := 0; i < 50; i++ {
+ if err := unix.Unmount(target, flags); err != nil {
+ switch err {
+ case unix.EBUSY:
+ time.Sleep(50 * time.Millisecond)
+ continue
+ default:
+ return err
+ }
+ }
+ return nil
+ }
+ return fmt.Errorf("failed to unmount target %s: %w", target, unix.EBUSY)
+}
+
+// UnmountAll repeatedly unmounts the given mount point until there
+// are no mounts remaining (EINVAL is returned by mount), which is
+// useful for undoing a stack of mounts on the same mount point.
+// UnmountAll all is noop when the first argument is an empty string.
+// This is done when the containerd client did not specify any rootfs
+// mounts (e.g. because the rootfs is managed outside containerd)
+// UnmountAll is noop when the mount path does not exist.
+func UnmountAll(mount string, flags int) error {
+ if mount == "" {
+ return nil
+ }
+ if _, err := os.Stat(mount); os.IsNotExist(err) {
+ return nil
+ }
+
+ for {
+ if err := unmount(mount, flags); err != nil {
+ // EINVAL is returned if the target is not a
+ // mount point, indicating that we are
+ // done. It can also indicate a few other
+ // things (such as invalid flags) which we
+ // unfortunately end up squelching here too.
+ if err == unix.EINVAL {
+ return nil
+ }
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount_idmapped_linux.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount_idmapped_linux.go
new file mode 100644
index 000000000000..4fee53e9856f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount_idmapped_linux.go
@@ -0,0 +1,204 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/containerd/containerd/v2/pkg/sys"
+)
+
+// TODO: Support multiple mappings in future
+func parseIDMapping(mapping string) ([]syscall.SysProcIDMap, error) {
+ parts := strings.Split(mapping, ":")
+ if len(parts) != 3 {
+ return nil, fmt.Errorf("user namespace mappings require the format `container-id:host-id:size`")
+ }
+
+ cID, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return nil, fmt.Errorf("invalid container id for user namespace remapping, %w", err)
+ }
+
+ hID, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid host id for user namespace remapping, %w", err)
+ }
+
+ size, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return nil, fmt.Errorf("invalid size for user namespace remapping, %w", err)
+ }
+
+ if cID < 0 || hID < 0 || size < 0 {
+ return nil, fmt.Errorf("invalid mapping %s, all IDs and size must be positive integers", mapping)
+ }
+
+ return []syscall.SysProcIDMap{
+ {
+ ContainerID: cID,
+ HostID: hID,
+ Size: size,
+ },
+ }, nil
+}
+
+// IDMapMount applies GID/UID shift according to gidmap/uidmap for target path
+func IDMapMount(source, target string, usernsFd int) (err error) {
+ var (
+ attr unix.MountAttr
+ )
+
+ attr.Attr_set = unix.MOUNT_ATTR_IDMAP
+ attr.Attr_clr = 0
+ attr.Propagation = 0
+ attr.Userns_fd = uint64(usernsFd)
+
+ dFd, err := unix.OpenTree(-int(unix.EBADF), source, uint(unix.OPEN_TREE_CLONE|unix.OPEN_TREE_CLOEXEC|unix.AT_EMPTY_PATH))
+ if err != nil {
+ return fmt.Errorf("Unable to open tree for %s: %w", target, err)
+ }
+
+ defer unix.Close(dFd)
+ if err = unix.MountSetattr(dFd, "", unix.AT_EMPTY_PATH, &attr); err != nil {
+ return fmt.Errorf("Unable to shift GID/UID for %s: %w", target, err)
+ }
+
+ if err = unix.MoveMount(dFd, "", -int(unix.EBADF), target, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
+ return fmt.Errorf("Unable to attach mount tree to %s: %w", target, err)
+ }
+ return nil
+}
+
+// GetUsernsFD forks the current process and creates a user namespace using
+// the specified mappings.
+func GetUsernsFD(uidmap, gidmap string) (_usernsFD *os.File, _ error) {
+ uidMaps, err := parseIDMapping(uidmap)
+ if err != nil {
+ return nil, err
+ }
+
+ gidMaps, err := parseIDMapping(gidmap)
+ if err != nil {
+ return nil, err
+ }
+ return getUsernsFD(uidMaps, gidMaps)
+}
+
+func getUsernsFD(uidMaps, gidMaps []syscall.SysProcIDMap) (_usernsFD *os.File, retErr error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ pid, pidfd, errno := sys.ForkUserns()
+ if errno != 0 {
+ return nil, errno
+ }
+
+ pidFD := os.NewFile(pidfd, "pidfd")
+ defer func() {
+ unix.PidfdSendSignal(int(pidFD.Fd()), unix.SIGKILL, nil, 0)
+
+ pidfdWaitid(pidFD)
+
+ pidFD.Close()
+ }()
+
+ // NOTE:
+ //
+ // The usernsFD will hold the userns reference in kernel. Even if the
+ // child process is reaped, the usernsFD is still valid.
+ usernsFD, err := os.Open(fmt.Sprintf("/proc/%d/ns/user", pid))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get userns file descriptor for /proc/%d/user/ns: %w", pid, err)
+ }
+ defer func() {
+ if retErr != nil {
+ usernsFD.Close()
+ }
+ }()
+
+ uidmapFile, err := os.OpenFile(fmt.Sprintf("/proc/%d/%s", pid, "uid_map"), os.O_WRONLY, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open /proc/%d/uid_map: %w", pid, err)
+ }
+ defer uidmapFile.Close()
+
+ gidmapFile, err := os.OpenFile(fmt.Sprintf("/proc/%d/%s", pid, "gid_map"), os.O_WRONLY, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open /proc/%d/gid_map: %w", pid, err)
+ }
+ defer gidmapFile.Close()
+
+ testHookKillChildBeforePidfdSendSignal(pid, pidFD)
+
+ // Ensure the child process is still alive. If the err is ESRCH, we
+ // should return error because we can't guarantee the usernsFD and
+ // u[g]idmapFile are valid. It's safe to return error and retry.
+ if err := unix.PidfdSendSignal(int(pidFD.Fd()), 0, nil, 0); err != nil {
+ return nil, fmt.Errorf("failed to ensure child process is alive: %w", err)
+ }
+
+ testHookKillChildAfterPidfdSendSignal(pid, pidFD)
+
+ // NOTE:
+ //
+ // The u[g]id_map file descriptor is still valid if the child process
+ // is reaped.
+ writeMappings := func(f *os.File, idmap []syscall.SysProcIDMap) error {
+ mappings := ""
+ for _, m := range idmap {
+ mappings = fmt.Sprintf("%s%d %d %d\n", mappings, m.ContainerID, m.HostID, m.Size)
+ }
+
+ _, err := f.Write([]byte(mappings))
+ if err1 := f.Close(); err1 != nil && err == nil {
+ err = err1
+ }
+ return err
+ }
+
+ if err := writeMappings(uidmapFile, uidMaps); err != nil {
+ return nil, fmt.Errorf("failed to write uid_map: %w", err)
+ }
+
+ if err := writeMappings(gidmapFile, gidMaps); err != nil {
+ return nil, fmt.Errorf("failed to write gid_map: %w", err)
+ }
+ return usernsFD, nil
+}
+
+func pidfdWaitid(pidFD *os.File) error {
+ return sys.IgnoringEINTR(func() error {
+ return unix.Waitid(unix.P_PIDFD, int(pidFD.Fd()), nil, unix.WEXITED, nil)
+ })
+}
+
+var (
+ testHookLock sync.Mutex
+
+ testHookKillChildBeforePidfdSendSignal = func(_pid uintptr, _pidFD *os.File) {}
+
+ testHookKillChildAfterPidfdSendSignal = func(_pid uintptr, _pidFD *os.File) {}
+)
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount_linux.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount_linux.go
new file mode 100644
index 000000000000..d76b31ede393
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount_linux.go
@@ -0,0 +1,606 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containerd/log"
+ "github.com/moby/sys/userns"
+ "golang.org/x/sys/unix"
+)
+
+type mountOpt struct {
+ flags int
+ data []string
+ losetup bool
+ uidmap string
+ gidmap string
+}
+
+var (
+ pagesize = 4096
+ allowedHelperBinaries = []string{"mount.fuse", "mount.fuse3"}
+)
+
+func init() {
+ pagesize = os.Getpagesize()
+}
+
+// prepareIDMappedOverlay is a helper function to obtain
+// actual "lowerdir=..." mount options. It creates and
+// applies id mapping for each lowerdir.
+//
+// It returns:
+// 1. New options that include new "lowedir=..." mount option.
+// 2. "Clean up" function -- it should be called as a defer one before
+// checking for error, because if do the second and avoid calling "clean up",
+// you're going to have "dirty" setup -- there's no guarantee that those
+// temporary mount points for lowedirs will be cleaned properly.
+// 3. Error -- nil if everything's fine, otherwise an error.
+func prepareIDMappedOverlay(usernsFd int, options []string) ([]string, func(), error) {
+ lowerIdx, lowerDirs := findOverlayLowerdirs(options)
+ if lowerIdx == -1 {
+ return options, nil, fmt.Errorf("failed to parse overlay lowerdir's from given options")
+ }
+
+ tmpLowerdirs, idMapCleanUp, err := doPrepareIDMappedOverlay(lowerDirs, usernsFd)
+ if err != nil {
+ return options, idMapCleanUp, fmt.Errorf("failed to create idmapped mount: %w", err)
+ }
+
+ options = append(options[:lowerIdx], options[lowerIdx+1:]...)
+ options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(tmpLowerdirs, ":")))
+
+ return options, idMapCleanUp, nil
+}
+
+// Mount to the provided target path.
+//
+// If m.Type starts with "fuse." or "fuse3.", "mount.fuse" or "mount.fuse3"
+// helper binary is called.
+func (m *Mount) mount(target string) (err error) {
+ for _, helperBinary := range allowedHelperBinaries {
+ // helperBinary = "mount.fuse", typePrefix = "fuse."
+ typePrefix := strings.TrimPrefix(helperBinary, "mount.") + "."
+ if strings.HasPrefix(m.Type, typePrefix) {
+ return m.mountWithHelper(helperBinary, typePrefix, target)
+ }
+ }
+ var (
+ chdir string
+ recalcOpt bool
+ usernsFd *os.File
+ options = m.Options
+ )
+
+ opt := parseMountOptions(options)
+ // The only remapping of both GID and UID is supported
+ if opt.uidmap != "" && opt.gidmap != "" {
+ if usernsFd, err = GetUsernsFD(opt.uidmap, opt.gidmap); err != nil {
+ return err
+ }
+ defer usernsFd.Close()
+
+ // overlay expects lowerdir's to be remapped instead
+ if m.Type == "overlay" {
+ var (
+ userNsCleanUp func()
+ )
+ options, userNsCleanUp, err = prepareIDMappedOverlay(int(usernsFd.Fd()), options)
+ defer userNsCleanUp()
+
+ if err != nil {
+ return fmt.Errorf("failed to prepare idmapped overlay: %w", err)
+ }
+ // To not meet concurrency issues while using the same lowedirs
+ // for different containers, replace them by temporary directories,
+ if optionsSize(options) >= pagesize-512 {
+ recalcOpt = true
+ } else {
+ opt = parseMountOptions(options)
+ }
+ }
+ }
+
+ // avoid hitting one page limit of mount argument buffer
+ //
+ // NOTE: 512 is a buffer during pagesize check.
+ if m.Type == "overlay" && optionsSize(options) >= pagesize-512 {
+ chdir, options = compactLowerdirOption(options)
+ // recalculate opt in case of lowerdirs have been replaced
+ // by idmapped ones OR idmapped mounts' not used/supported.
+ if recalcOpt || (opt.uidmap == "" || opt.gidmap == "") {
+ opt = parseMountOptions(options)
+ }
+ }
+
+ // propagation types.
+ const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
+
+ // Ensure propagation type change flags aren't included in other calls.
+ oflags := opt.flags &^ ptypes
+
+ var loopParams LoopParams
+ if opt.losetup {
+ loopParams = LoopParams{
+ Readonly: oflags&unix.MS_RDONLY == unix.MS_RDONLY,
+ Autoclear: true,
+ }
+ loopParams.Direct, opt.data = hasDirectIO(opt.data)
+ }
+
+ dataInStr := strings.Join(opt.data, ",")
+ if len(dataInStr) > pagesize {
+ return errors.New("mount options is too long")
+ }
+
+ // In the case of remounting with changed data (dataInStr != ""), need to call mount (moby/moby#34077).
+ if opt.flags&unix.MS_REMOUNT == 0 || dataInStr != "" {
+ // Initial call applying all non-propagation flags for mount
+ // or remount with changed data
+ source := m.Source
+ if opt.losetup {
+ loFile, err := setupLoop(m.Source, loopParams)
+ if err != nil {
+ return err
+ }
+ defer loFile.Close()
+
+ // Mount the loop device instead
+ source = loFile.Name()
+ }
+ if err := mountAt(chdir, source, target, m.Type, uintptr(oflags), dataInStr); err != nil {
+ return err
+ }
+ }
+
+ if opt.flags&ptypes != 0 {
+ // Change the propagation type.
+ const pflags = ptypes | unix.MS_REC | unix.MS_SILENT
+ if err := unix.Mount("", target, "", uintptr(opt.flags&pflags), ""); err != nil {
+ return err
+ }
+ }
+
+ const broflags = unix.MS_BIND | unix.MS_RDONLY
+ if oflags&broflags == broflags {
+ // Preserve CL_UNPRIVILEGED "locked" flags of the
+ // bind mount target when we remount to make the bind readonly.
+ // This is necessary to ensure that
+ // bind-mounting "with options" will not fail with user namespaces, due to
+ // kernel restrictions that require user namespace mounts to preserve
+ // CL_UNPRIVILEGED locked flags.
+ var unprivFlags int
+ if userns.RunningInUserNS() {
+ unprivFlags, err = getUnprivilegedMountFlags(target)
+ if err != nil {
+ return err
+ }
+ }
+ // Remount the bind to apply read only.
+ return unix.Mount("", target, "", uintptr(oflags|unprivFlags|unix.MS_REMOUNT), "")
+ }
+
+ // remap non-overlay mount point
+ if opt.uidmap != "" && opt.gidmap != "" && m.Type != "overlay" {
+ if err := IDMapMount(target, target, int(usernsFd.Fd())); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Get the set of mount flags that are set on the mount that contains the given
+// path and are locked by CL_UNPRIVILEGED.
+//
+// From /~https://github.com/moby/moby/blob/v23.0.1/daemon/oci_linux.go#L430-L460
+func getUnprivilegedMountFlags(path string) (int, error) {
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(path, &statfs); err != nil {
+ return 0, err
+ }
+
+ // The set of keys come from /~https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048.
+ unprivilegedFlags := []int{
+ unix.MS_RDONLY,
+ unix.MS_NODEV,
+ unix.MS_NOEXEC,
+ unix.MS_NOSUID,
+ unix.MS_NOATIME,
+ unix.MS_RELATIME,
+ unix.MS_NODIRATIME,
+ }
+
+ var flags int
+ for flag := range unprivilegedFlags {
+ if int(statfs.Flags)&flag == flag {
+ flags |= flag
+ }
+ }
+
+ return flags, nil
+}
+
+func doPrepareIDMappedOverlay(lowerDirs []string, usernsFd int) (tmpLowerDirs []string, _ func(), _ error) {
+ td, err := os.MkdirTemp(tempMountLocation, "ovl-idmapped")
+ if err != nil {
+ return nil, nil, err
+ }
+ cleanUp := func() {
+ for _, lowerDir := range tmpLowerDirs {
+ if err := unix.Unmount(lowerDir, 0); err != nil {
+ log.L.WithError(err).Warnf("failed to unmount temp lowerdir %s", lowerDir)
+ }
+ }
+ if terr := os.RemoveAll(filepath.Clean(filepath.Join(tmpLowerDirs[0], ".."))); terr != nil {
+ log.L.WithError(terr).Warnf("failed to remove temporary overlay lowerdir's")
+ }
+ }
+ for i, lowerDir := range lowerDirs {
+ tmpLowerDir := filepath.Join(td, strconv.Itoa(i))
+ tmpLowerDirs = append(tmpLowerDirs, tmpLowerDir)
+
+ if err = os.MkdirAll(tmpLowerDir, 0700); err != nil {
+ return nil, cleanUp, fmt.Errorf("failed to create temporary dir: %w", err)
+ }
+ if err = IDMapMount(lowerDir, tmpLowerDir, usernsFd); err != nil {
+ return nil, cleanUp, err
+ }
+ }
+ return tmpLowerDirs, cleanUp, nil
+}
+
+// Unmount the provided mount path with the flags
+func Unmount(target string, flags int) error {
+ if err := unmount(target, flags); err != nil && err != unix.EINVAL {
+ return err
+ }
+ return nil
+}
+
+// fuseSuperMagic is defined in statfs(2)
+const fuseSuperMagic = 0x65735546
+
+func isFUSE(dir string) bool {
+ var st unix.Statfs_t
+ if err := unix.Statfs(dir, &st); err != nil {
+ return false
+ }
+ return st.Type == fuseSuperMagic
+}
+
+// unmountFUSE attempts to unmount using fusermount/fusermount3 helper binary.
+//
+// For FUSE mounts, using these helper binaries is preferred, see:
+// /~https://github.com/containerd/containerd/pull/3765#discussion_r342083514
+func unmountFUSE(target string) error {
+ var err error
+ for _, helperBinary := range []string{"fusermount3", "fusermount"} {
+ cmd := exec.Command(helperBinary, "-u", target)
+ err = cmd.Run()
+ if err == nil {
+ return nil
+ }
+ }
+ return err
+}
+
+func unmount(target string, flags int) error {
+ if isFUSE(target) {
+ if err := unmountFUSE(target); err == nil {
+ return nil
+ }
+ }
+ for i := 0; i < 50; i++ {
+ if err := unix.Unmount(target, flags); err != nil {
+ switch err {
+ case unix.EBUSY:
+ time.Sleep(50 * time.Millisecond)
+ continue
+ default:
+ return err
+ }
+ }
+ return nil
+ }
+ return fmt.Errorf("failed to unmount target %s: %w", target, unix.EBUSY)
+}
+
+// UnmountAll repeatedly unmounts the given mount point until there
+// are no mounts remaining (EINVAL is returned by mount), which is
+// useful for undoing a stack of mounts on the same mount point.
+// UnmountAll all is noop when the first argument is an empty string.
+// This is done when the containerd client did not specify any rootfs
+// mounts (e.g. because the rootfs is managed outside containerd)
+// UnmountAll is noop when the mount path does not exist.
+func UnmountAll(mount string, flags int) error {
+ if mount == "" {
+ return nil
+ }
+ if _, err := os.Stat(mount); os.IsNotExist(err) {
+ return nil
+ }
+
+ for {
+ if err := unmount(mount, flags); err != nil {
+ // EINVAL is returned if the target is not a
+ // mount point, indicating that we are
+ // done. It can also indicate a few other
+ // things (such as invalid flags) which we
+ // unfortunately end up squelching here too.
+ if err == unix.EINVAL {
+ return nil
+ }
+ return err
+ }
+ }
+}
+
+// parseMountOptions takes fstab style mount options and parses them for
+// use with a standard mount() syscall
+func parseMountOptions(options []string) (opt mountOpt) {
+ loopOpt := "loop"
+ flagsMap := map[string]struct {
+ clear bool
+ flag int
+ }{
+ "async": {true, unix.MS_SYNCHRONOUS},
+ "atime": {true, unix.MS_NOATIME},
+ "bind": {false, unix.MS_BIND},
+ "defaults": {false, 0},
+ "dev": {true, unix.MS_NODEV},
+ "diratime": {true, unix.MS_NODIRATIME},
+ "dirsync": {false, unix.MS_DIRSYNC},
+ "exec": {true, unix.MS_NOEXEC},
+ "mand": {false, unix.MS_MANDLOCK},
+ "noatime": {false, unix.MS_NOATIME},
+ "nodev": {false, unix.MS_NODEV},
+ "nodiratime": {false, unix.MS_NODIRATIME},
+ "noexec": {false, unix.MS_NOEXEC},
+ "nomand": {true, unix.MS_MANDLOCK},
+ "norelatime": {true, unix.MS_RELATIME},
+ "nostrictatime": {true, unix.MS_STRICTATIME},
+ "nosuid": {false, unix.MS_NOSUID},
+ "rbind": {false, unix.MS_BIND | unix.MS_REC},
+ "relatime": {false, unix.MS_RELATIME},
+ "remount": {false, unix.MS_REMOUNT},
+ "ro": {false, unix.MS_RDONLY},
+ "rw": {true, unix.MS_RDONLY},
+ "strictatime": {false, unix.MS_STRICTATIME},
+ "suid": {true, unix.MS_NOSUID},
+ "sync": {false, unix.MS_SYNCHRONOUS},
+ }
+ for _, o := range options {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flagsMap[o]; exists && f.flag != 0 {
+ if f.clear {
+ opt.flags &^= f.flag
+ } else {
+ opt.flags |= f.flag
+ }
+ } else if o == loopOpt {
+ opt.losetup = true
+ } else if strings.HasPrefix(o, "uidmap=") {
+ opt.uidmap = strings.TrimPrefix(o, "uidmap=")
+ } else if strings.HasPrefix(o, "gidmap=") {
+ opt.gidmap = strings.TrimPrefix(o, "gidmap=")
+ } else {
+ opt.data = append(opt.data, o)
+ }
+ }
+ return
+}
+
+func hasDirectIO(opts []string) (bool, []string) {
+ for idx, opt := range opts {
+ if opt == "direct-io" {
+ return true, append(opts[:idx], opts[idx+1:]...)
+ }
+ }
+ return false, opts
+}
+
+// compactLowerdirOption updates overlay lowdir option and returns the common
+// dir among all the lowdirs.
+func compactLowerdirOption(opts []string) (string, []string) {
+ idx, dirs := findOverlayLowerdirs(opts)
+ if idx == -1 || len(dirs) == 1 {
+ // no need to compact if there is only one lowerdir
+ return "", opts
+ }
+
+ // find out common dir
+ commondir := longestCommonPrefix(dirs)
+ if commondir == "" {
+ return "", opts
+ }
+
+ // NOTE: the snapshot id is based on digits.
+ // in order to avoid to get snapshots/x, should be back to parent dir.
+ // however, there is assumption that the common dir is ${root}/io.containerd.v1.overlayfs/snapshots.
+ commondir = path.Dir(commondir)
+ if commondir == "/" || commondir == "." {
+ return "", opts
+ }
+ commondir = commondir + "/"
+
+ newdirs := make([]string, 0, len(dirs))
+ for _, dir := range dirs {
+ if len(dir) <= len(commondir) {
+ return "", opts
+ }
+ newdirs = append(newdirs, dir[len(commondir):])
+ }
+
+ newopts := copyOptions(opts)
+ newopts = append(newopts[:idx], newopts[idx+1:]...)
+ newopts = append(newopts, fmt.Sprintf("lowerdir=%s", strings.Join(newdirs, ":")))
+ return commondir, newopts
+}
+
+// findOverlayLowerdirs returns the index of lowerdir in mount's options and
+// all the lowerdir target.
+func findOverlayLowerdirs(opts []string) (int, []string) {
+ var (
+ idx = -1
+ prefix = "lowerdir="
+ )
+
+ for i, opt := range opts {
+ if strings.HasPrefix(opt, prefix) {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return -1, nil
+ }
+ return idx, strings.Split(opts[idx][len(prefix):], ":")
+}
+
+// longestCommonPrefix finds the longest common prefix in the string slice.
+func longestCommonPrefix(strs []string) string {
+ if len(strs) == 0 {
+ return ""
+ } else if len(strs) == 1 {
+ return strs[0]
+ }
+
+ // find out the min/max value by alphabetical order
+ min, max := strs[0], strs[0]
+ for _, str := range strs[1:] {
+ if min > str {
+ min = str
+ }
+ if max < str {
+ max = str
+ }
+ }
+
+ // find out the common part between min and max
+ for i := 0; i < len(min) && i < len(max); i++ {
+ if min[i] != max[i] {
+ return min[:i]
+ }
+ }
+ return min
+}
+
+// copyOptions copies the options.
+func copyOptions(opts []string) []string {
+ if len(opts) == 0 {
+ return nil
+ }
+
+ acopy := make([]string, len(opts))
+ copy(acopy, opts)
+ return acopy
+}
+
+// optionsSize returns the byte size of options of mount.
+func optionsSize(opts []string) int {
+ size := 0
+ for _, opt := range opts {
+ size += len(opt)
+ }
+ return size
+}
+
+func mountAt(chdir string, source, target, fstype string, flags uintptr, data string) error {
+ if chdir == "" {
+ return unix.Mount(source, target, fstype, flags, data)
+ }
+
+ ch := make(chan error, 1)
+ go func() {
+ runtime.LockOSThread()
+
+ // Do not unlock this thread.
+ // If the thread is unlocked go will try to use it for other goroutines.
+ // However it is not possible to restore the thread state after CLONE_FS.
+ //
+ // Once the goroutine exits the thread should eventually be terminated by go.
+
+ if err := unix.Unshare(unix.CLONE_FS); err != nil {
+ ch <- err
+ return
+ }
+
+ if err := unix.Chdir(chdir); err != nil {
+ ch <- err
+ return
+ }
+
+ ch <- unix.Mount(source, target, fstype, flags, data)
+ }()
+ return <-ch
+}
+
+func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error {
+ // helperBinary: "mount.fuse3"
+ // target: "/foo/merged"
+ // m.Type: "fuse3.fuse-overlayfs"
+ // command: "mount.fuse3 overlay /foo/merged -o lowerdir=/foo/lower2:/foo/lower1,upperdir=/foo/upper,workdir=/foo/work -t fuse-overlayfs"
+ args := []string{m.Source, target}
+ for _, o := range m.Options {
+ args = append(args, "-o", o)
+ }
+ args = append(args, "-t", strings.TrimPrefix(m.Type, typePrefix))
+
+ infoBeforeMount, err := Lookup(target)
+ if err != nil {
+ return err
+ }
+
+ // cmd.CombinedOutput() may intermittently return ECHILD because of our signal handling in shim.
+ // See #4387 and wait(2).
+ const retriesOnECHILD = 10
+ for i := 0; i < retriesOnECHILD; i++ {
+ cmd := exec.Command(helperBinary, args...)
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ return nil
+ }
+ if !errors.Is(err, unix.ECHILD) {
+ return fmt.Errorf("mount helper [%s %v] failed: %q: %w", helperBinary, args, string(out), err)
+ }
+ // We got ECHILD, we are not sure whether the mount was successful.
+ // If the mount ID has changed, we are sure we got some new mount, but still not sure it is fully completed.
+ // So we attempt to unmount the new mount before retrying.
+ infoAfterMount, err := Lookup(target)
+ if err != nil {
+ return err
+ }
+ if infoAfterMount.ID != infoBeforeMount.ID {
+ _ = unmount(target, 0)
+ }
+ }
+ return fmt.Errorf("mount helper [%s %v] failed with ECHILD (retried %d times)", helperBinary, args, retriesOnECHILD)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount_unix.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount_unix.go
new file mode 100644
index 000000000000..49a17152b4ca
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount_unix.go
@@ -0,0 +1,71 @@
+//go:build !windows && !darwin && !openbsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "os"
+ "sort"
+
+ "github.com/moby/sys/mountinfo"
+)
+
+// UnmountRecursive unmounts the target and all mounts underneath, starting
+// with the deepest mount first.
+func UnmountRecursive(target string, flags int) error {
+ if target == "" {
+ return nil
+ }
+
+ target, err := CanonicalizePath(target)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil
+ }
+ return err
+ }
+
+ mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target))
+ if err != nil {
+ return err
+ }
+
+ targetSet := make(map[string]struct{})
+ for _, m := range mounts {
+ targetSet[m.Mountpoint] = struct{}{}
+ }
+
+ var targets []string
+ for m := range targetSet {
+ targets = append(targets, m)
+ }
+
+ // Make the deepest mount be first
+ sort.SliceStable(targets, func(i, j int) bool {
+ return len(targets[i]) > len(targets[j])
+ })
+
+ for i, target := range targets {
+ if err := UnmountAll(target, flags); err != nil {
+ if i == len(targets)-1 { // last mount
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount_unsupported.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount_unsupported.go
new file mode 100644
index 000000000000..894467a9933e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount_unsupported.go
@@ -0,0 +1,46 @@
+//go:build darwin || openbsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import "errors"
+
+var (
+ // ErrNotImplementOnUnix is returned for methods that are not implemented
+ ErrNotImplementOnUnix = errors.New("not implemented under unix")
+)
+
+// Mount is not implemented on this platform
+func (m *Mount) mount(target string) error {
+ return ErrNotImplementOnUnix
+}
+
+// Unmount is not implemented on this platform
+func Unmount(mount string, flags int) error {
+ return ErrNotImplementOnUnix
+}
+
+// UnmountAll is not implemented on this platform
+func UnmountAll(mount string, flags int) error {
+ return ErrNotImplementOnUnix
+}
+
+// UnmountRecursive is not implemented on this platform
+func UnmountRecursive(mount string, flags int) error {
+ return ErrNotImplementOnUnix
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mount_windows.go b/vendor/github.com/containerd/containerd/v2/core/mount/mount_windows.go
new file mode 100644
index 000000000000..91ac6968d2f7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mount_windows.go
@@ -0,0 +1,190 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Microsoft/go-winio/pkg/bindfilter"
+ "github.com/Microsoft/hcsshim"
+ "github.com/containerd/log"
+ "golang.org/x/sys/windows"
+)
+
+const sourceStreamName = "containerd.io-source"
+
+var (
+ // ErrNotImplementOnWindows is returned when an action is not implemented for windows
+ ErrNotImplementOnWindows = errors.New("not implemented under windows")
+)
+
+// Mount to the provided target.
+func (m *Mount) mount(target string) (retErr error) {
+ if m.Type != "windows-layer" {
+ return fmt.Errorf("invalid windows mount type: '%s'", m.Type)
+ }
+
+ home, layerID := filepath.Split(m.Source)
+
+ parentLayerPaths, err := m.GetParentPaths()
+ if err != nil {
+ return err
+ }
+
+ var di = hcsshim.DriverInfo{
+ HomeDir: home,
+ }
+
+ if err := hcsshim.ActivateLayer(di, layerID); err != nil {
+ return fmt.Errorf("failed to activate layer %s: %w", m.Source, err)
+ }
+ defer func() {
+ if retErr != nil {
+ if layerErr := hcsshim.DeactivateLayer(di, layerID); layerErr != nil {
+ log.G(context.TODO()).WithError(layerErr).Error("failed to deactivate layer during mount failure cleanup")
+ }
+ }
+ }()
+
+ if err := hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil {
+ return fmt.Errorf("failed to prepare layer %s: %w", m.Source, err)
+ }
+
+ defer func() {
+ if retErr != nil {
+ if layerErr := hcsshim.UnprepareLayer(di, layerID); layerErr != nil {
+ log.G(context.TODO()).WithError(layerErr).Error("failed to unprepare layer during mount failure cleanup")
+ }
+ }
+ }()
+
+ volume, err := hcsshim.GetLayerMountPath(di, layerID)
+ if err != nil {
+ return fmt.Errorf("failed to get volume path for layer %s: %w", m.Source, err)
+ }
+
+ if len(parentLayerPaths) == 0 {
+ // this is a base layer. It gets mounted without going through WCIFS. We need to mount the Files
+ // folder, not the actual source, or the client may inadvertently remove metadata files.
+ volume = filepath.Join(volume, "Files")
+ if _, err := os.Stat(volume); err != nil {
+ return fmt.Errorf("no Files folder in layer %s", layerID)
+ }
+ }
+ if err := bindfilter.ApplyFileBinding(target, volume, m.ReadOnly()); err != nil {
+ return fmt.Errorf("failed to set volume mount path for layer %s: %w", m.Source, err)
+ }
+ defer func() {
+ if retErr != nil {
+ if bindErr := bindfilter.RemoveFileBinding(target); bindErr != nil {
+ log.G(context.TODO()).WithError(bindErr).Error("failed to remove binding during mount failure cleanup")
+ }
+ }
+ }()
+
+ // Add an Alternate Data Stream to record the layer source.
+ // See https://docs.microsoft.com/en-au/archive/blogs/askcore/alternate-data-streams-in-ntfs
+ // for details on Alternate Data Streams.
+ if err := os.WriteFile(filepath.Clean(target)+":"+sourceStreamName, []byte(m.Source), 0666); err != nil {
+ return fmt.Errorf("failed to record source for layer %s: %w", m.Source, err)
+ }
+
+ return nil
+}
+
+// ParentLayerPathsFlag is the options flag used to represent the JSON encoded
+// list of parent layers required to use the layer
+const ParentLayerPathsFlag = "parentLayerPaths="
+
+// GetParentPaths of the mount
+func (m *Mount) GetParentPaths() ([]string, error) {
+ var parentLayerPaths []string
+ for _, option := range m.Options {
+ if strings.HasPrefix(option, ParentLayerPathsFlag) {
+ err := json.Unmarshal([]byte(option[len(ParentLayerPathsFlag):]), &parentLayerPaths)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal parent layer paths from mount: %w", err)
+ }
+ }
+ }
+ return parentLayerPaths, nil
+}
+
+// Unmount the mount at the provided path
+func Unmount(mount string, flags int) error {
+ mount = filepath.Clean(mount)
+ adsFile := mount + ":" + sourceStreamName
+ var layerPath string
+
+ if _, err := os.Lstat(adsFile); err == nil {
+ layerPathb, err := os.ReadFile(mount + ":" + sourceStreamName)
+ if err != nil {
+ return fmt.Errorf("failed to retrieve source for layer %s: %w", mount, err)
+ }
+ layerPath = string(layerPathb)
+ }
+
+ if err := bindfilter.RemoveFileBinding(mount); err != nil {
+ if errors.Is(err, windows.ERROR_INVALID_PARAMETER) || errors.Is(err, windows.ERROR_NOT_FOUND) {
+ // not a mount point
+ return nil
+ }
+ return fmt.Errorf("removing mount: %w", err)
+ }
+
+ if layerPath != "" {
+ var (
+ home, layerID = filepath.Split(layerPath)
+ di = hcsshim.DriverInfo{
+ HomeDir: home,
+ }
+ )
+
+ if err := hcsshim.UnprepareLayer(di, layerID); err != nil {
+ return fmt.Errorf("failed to unprepare layer %s: %w", mount, err)
+ }
+
+ if err := hcsshim.DeactivateLayer(di, layerID); err != nil {
+ return fmt.Errorf("failed to deactivate layer %s: %w", mount, err)
+ }
+ }
+ return nil
+}
+
+// UnmountAll unmounts from the provided path
+func UnmountAll(mount string, flags int) error {
+ if mount == "" {
+ // This isn't an error, per the EINVAL handling in the Linux version
+ return nil
+ }
+ if _, err := os.Stat(mount); os.IsNotExist(err) {
+ return nil
+ }
+
+ return Unmount(mount, flags)
+}
+
+// UnmountRecursive unmounts from the provided path
+func UnmountRecursive(mount string, flags int) error {
+ return UnmountAll(mount, flags)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/mountinfo.go b/vendor/github.com/containerd/containerd/v2/core/mount/mountinfo.go
new file mode 100644
index 000000000000..a42238d867fd
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/mountinfo.go
@@ -0,0 +1,23 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import "github.com/moby/sys/mountinfo"
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc//mountinfo file.
+type Info = mountinfo.Info
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/temp.go b/vendor/github.com/containerd/containerd/v2/core/mount/temp.go
new file mode 100644
index 000000000000..ba37ba5f3dbc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/temp.go
@@ -0,0 +1,126 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containerd/log"
+)
+
+var tempMountLocation = getTempDir()
+
+// WithTempMount mounts the provided mounts to a temp dir, and pass the temp dir to f.
+// The mounts are valid during the call to the f.
+// Finally we will unmount and remove the temp dir regardless of the result of f.
+//
+// NOTE: The volatile option of overlayfs doesn't allow to mount again using the
+// same upper / work dirs. Since it's a temp mount, avoid using that option here
+// if found.
+func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) {
+ root, uerr := os.MkdirTemp(tempMountLocation, "containerd-mount")
+ if uerr != nil {
+ return fmt.Errorf("failed to create temp dir: %w", uerr)
+ }
+ // We use Remove here instead of RemoveAll.
+ // The RemoveAll will delete the temp dir and all children it contains.
+ // When the Unmount fails, RemoveAll will incorrectly delete data from
+ // the mounted dir. However, if we use Remove, even though we won't
+ // successfully delete the temp dir and it may leak, we won't loss data
+ // from the mounted dir.
+ // For details, please refer to #1868 #1785.
+ defer func() {
+ if uerr = os.Remove(root); uerr != nil {
+ log.G(ctx).WithError(uerr).WithField("dir", root).Error("failed to remove mount temp dir")
+ }
+ }()
+
+ // We should do defer first, if not we will not do Unmount when only a part of Mounts are failed.
+ defer func() {
+ if uerr = UnmountMounts(mounts, root, 0); uerr != nil {
+ uerr = fmt.Errorf("failed to unmount %s: %w", root, uerr)
+ if err == nil {
+ err = uerr
+ } else {
+ err = fmt.Errorf("%s: %w", uerr.Error(), err)
+ }
+ }
+ }()
+
+ if uerr = All(RemoveVolatileOption(mounts), root); uerr != nil {
+ return fmt.Errorf("failed to mount %s: %w", root, uerr)
+ }
+ if err := f(root); err != nil {
+ return fmt.Errorf("mount callback failed on %s: %w", root, err)
+ }
+ return nil
+}
+
+// RemoveVolatileOption copies and remove the volatile option for overlay
+// type, since overlayfs doesn't allow to mount again using the same upper/work
+// dirs.
+//
+// REF: https://docs.kernel.org/filesystems/overlayfs.html#volatile-mount
+//
+// TODO: Make this logic conditional once the kernel supports reusing
+// overlayfs volatile mounts.
+func RemoveVolatileOption(mounts []Mount) []Mount {
+ var out []Mount
+ for i, m := range mounts {
+ if m.Type != "overlay" {
+ continue
+ }
+ for j, opt := range m.Options {
+ if opt == "volatile" {
+ if out == nil {
+ out = copyMounts(mounts)
+ }
+ out[i].Options = append(out[i].Options[:j], out[i].Options[j+1:]...)
+ break
+ }
+ }
+ }
+
+ if out != nil {
+ return out
+ }
+
+ return mounts
+}
+
+// copyMounts creates a copy of the original slice to allow for modification and not altering the original
+func copyMounts(in []Mount) []Mount {
+ out := make([]Mount, len(in))
+ copy(out, in)
+ return out
+}
+
+// WithReadonlyTempMount mounts the provided mounts to a temp dir as readonly,
+// and pass the temp dir to f. The mounts are valid during the call to the f.
+// Finally we will unmount and remove the temp dir regardless of the result of f.
+func WithReadonlyTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) {
+ return WithTempMount(ctx, readonlyMounts(mounts), f)
+}
+
+func getTempDir() string {
+ if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" {
+ return xdg
+ }
+ return os.TempDir()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/temp_unix.go b/vendor/github.com/containerd/containerd/v2/core/mount/temp_unix.go
new file mode 100644
index 000000000000..8845e47aa1f2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/temp_unix.go
@@ -0,0 +1,60 @@
+//go:build !windows && !darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+import (
+ "os"
+ "sort"
+
+ "github.com/moby/sys/mountinfo"
+)
+
+// SetTempMountLocation sets the temporary mount location
+func SetTempMountLocation(root string) error {
+ err := os.MkdirAll(root, 0700)
+ if err != nil {
+ return err
+ }
+ // We need to pass canonicalized path to mountinfo.PrefixFilter in CleanupTempMounts
+ tempMountLocation, err = CanonicalizePath(root)
+ return err
+}
+
+// CleanupTempMounts all temp mounts and remove the directories
+func CleanupTempMounts(flags int) (warnings []error, err error) {
+ mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(tempMountLocation))
+ if err != nil {
+ return nil, err
+ }
+
+ // Make the deepest mount be first
+ sort.Slice(mounts, func(i, j int) bool {
+ return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
+ })
+ for _, mount := range mounts {
+ if err := UnmountAll(mount.Mountpoint, flags); err != nil {
+ warnings = append(warnings, err)
+ continue
+ }
+ if err := os.Remove(mount.Mountpoint); err != nil {
+ warnings = append(warnings, err)
+ }
+ }
+ return warnings, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/mount/temp_unsupported.go b/vendor/github.com/containerd/containerd/v2/core/mount/temp_unsupported.go
new file mode 100644
index 000000000000..3ccc0444ff91
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/mount/temp_unsupported.go
@@ -0,0 +1,29 @@
+//go:build windows || darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mount
+
+// SetTempMountLocation sets the temporary mount location
+func SetTempMountLocation(root string) error {
+ return nil
+}
+
+// CleanupTempMounts all temp mounts and remove the directories
+func CleanupTempMounts(flags int) ([]error, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go
new file mode 100644
index 000000000000..16ea609a13be
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go
@@ -0,0 +1,234 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package auth
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
+ "github.com/containerd/containerd/v2/pkg/tracing"
+ "github.com/containerd/containerd/v2/version"
+ "github.com/containerd/log"
+)
+
+var (
+ // ErrNoToken is returned if a request is successful but the body does not
+ // contain an authorization token.
+ ErrNoToken = errors.New("authorization server did not include a token in the response")
+)
+
+// GenerateTokenOptions generates options for fetching a token based on a challenge
+func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) {
+ realm, ok := c.Parameters["realm"]
+ if !ok {
+ return TokenOptions{}, errors.New("no realm specified for token auth challenge")
+ }
+
+ realmURL, err := url.Parse(realm)
+ if err != nil {
+ return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err)
+ }
+
+ to := TokenOptions{
+ Realm: realmURL.String(),
+ Service: c.Parameters["service"],
+ Username: username,
+ Secret: secret,
+ }
+
+ scope, ok := c.Parameters["scope"]
+ if ok {
+ to.Scopes = append(to.Scopes, strings.Split(scope, " ")...)
+ } else {
+ log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge")
+ }
+
+ return to, nil
+}
+
+// TokenOptions are options for requesting a token
+type TokenOptions struct {
+ Realm string
+ Service string
+ Scopes []string
+ Username string
+ Secret string
+
+ // FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token.
+ //
+ // For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request.
+ // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token
+ //
+ // For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request.
+ // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token
+ FetchRefreshToken bool
+}
+
+// OAuthTokenResponse is response from fetching token with a OAuth POST request
+type OAuthTokenResponse struct {
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresInSeconds int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ Scope string `json:"scope"`
+}
+
+// FetchTokenWithOAuth fetches a token using a POST request
+func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) {
+ c := *client
+ client = &c
+ tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "FetchTokenWithOAuth"))
+
+ form := url.Values{}
+ if len(to.Scopes) > 0 {
+ form.Set("scope", strings.Join(to.Scopes, " "))
+ }
+ form.Set("service", to.Service)
+ form.Set("client_id", clientID)
+
+ if to.Username == "" {
+ form.Set("grant_type", "refresh_token")
+ form.Set("refresh_token", to.Secret)
+ } else {
+ form.Set("grant_type", "password")
+ form.Set("username", to.Username)
+ form.Set("password", to.Secret)
+ }
+ if to.FetchRefreshToken {
+ form.Set("access_type", "offline")
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ for k, v := range headers {
+ req.Header[k] = append(req.Header[k], v...)
+ }
+ if len(req.Header.Get("User-Agent")) == 0 {
+ req.Header.Set("User-Agent", "containerd/"+version.Version)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, remoteserrors.NewUnexpectedStatusErr(resp)
+ }
+
+ decoder := json.NewDecoder(resp.Body)
+
+ var tr OAuthTokenResponse
+ if err = decoder.Decode(&tr); err != nil {
+ return nil, fmt.Errorf("unable to decode token response: %w", err)
+ }
+
+ if tr.AccessToken == "" {
+ return nil, ErrNoToken
+ }
+
+ return &tr, nil
+}
+
+// FetchTokenResponse is response from fetching token with GET request
+type FetchTokenResponse struct {
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresInSeconds int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+// FetchToken fetches a token using a GET request
+func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
+ c := *client
+ client = &c
+ tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "FetchToken"))
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range headers {
+ req.Header[k] = append(req.Header[k], v...)
+ }
+ if len(req.Header.Get("User-Agent")) == 0 {
+ req.Header.Set("User-Agent", "containerd/"+version.Version)
+ }
+
+ reqParams := req.URL.Query()
+
+ if to.Service != "" {
+ reqParams.Add("service", to.Service)
+ }
+
+ for _, scope := range to.Scopes {
+ reqParams.Add("scope", scope)
+ }
+
+ if to.Secret != "" {
+ req.SetBasicAuth(to.Username, to.Secret)
+ }
+
+ if to.FetchRefreshToken {
+ reqParams.Add("offline_token", "true")
+ }
+
+ req.URL.RawQuery = reqParams.Encode()
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, remoteserrors.NewUnexpectedStatusErr(resp)
+ }
+
+ decoder := json.NewDecoder(resp.Body)
+
+ var tr FetchTokenResponse
+ if err = decoder.Decode(&tr); err != nil {
+ return nil, fmt.Errorf("unable to decode token response: %w", err)
+ }
+
+ // `access_token` is equivalent to `token` and if both are specified
+ // the choice is undefined. Canonicalize `access_token` by sticking
+ // things in `token`.
+ if tr.AccessToken != "" {
+ tr.Token = tr.AccessToken
+ }
+
+ if tr.Token == "" {
+ return nil, ErrNoToken
+ }
+
+ return &tr, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/parse.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/parse.go
new file mode 100644
index 000000000000..6ca935158ea6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/parse.go
@@ -0,0 +1,200 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package auth
+
+import (
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// AuthenticationScheme defines scheme of the authentication method
+type AuthenticationScheme byte
+
+const (
+ // BasicAuth is scheme for Basic HTTP Authentication RFC 7617
+ BasicAuth AuthenticationScheme = 1 << iota
+ // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616
+ DigestAuth
+ // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750
+ BearerAuth
+)
+
+// Challenge carries information from a WWW-Authenticate response header.
+// See RFC 2617.
+type Challenge struct {
+ // scheme is the auth-scheme according to RFC 2617
+ Scheme AuthenticationScheme
+
+ // parameters are the auth-params according to RFC 2617
+ Parameters map[string]string
+}
+
+type byScheme []Challenge
+
+func (bs byScheme) Len() int { return len(bs) }
+func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
+
+// Less sorts in priority order: token > digest > basic
+func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme }
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// ParseAuthHeader parses challenges from WWW-Authenticate header
+func ParseAuthHeader(header http.Header) []Challenge {
+ challenges := []Challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ var s AuthenticationScheme
+ switch v {
+ case "basic":
+ s = BasicAuth
+ case "digest":
+ s = DigestAuth
+ case "bearer":
+ s = BearerAuth
+ default:
+ continue
+ }
+ challenges = append(challenges, Challenge{Scheme: s, Parameters: p})
+ }
+ sort.Stable(byScheme(challenges))
+ return challenges
+}
+
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ for {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ return
+ }
+ s = s[1:]
+ }
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go
new file mode 100644
index 000000000000..01fc792fa4a9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go
@@ -0,0 +1,378 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/remotes/docker/auth"
+ remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+)
+
+type dockerAuthorizer struct {
+ credentials func(string) (string, string, error)
+
+ client *http.Client
+ header http.Header
+ mu sync.RWMutex
+
+ // indexed by host name
+ handlers map[string]*authHandler
+
+ onFetchRefreshToken OnFetchRefreshToken
+}
+
+type authorizerConfig struct {
+ credentials func(string) (string, string, error)
+ client *http.Client
+ header http.Header
+ onFetchRefreshToken OnFetchRefreshToken
+}
+
+// AuthorizerOpt configures an authorizer
+type AuthorizerOpt func(*authorizerConfig)
+
+// WithAuthClient provides the HTTP client for the authorizer
+func WithAuthClient(client *http.Client) AuthorizerOpt {
+ return func(opt *authorizerConfig) {
+ opt.client = client
+ }
+}
+
+// WithAuthCreds provides a credential function to the authorizer
+func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt {
+ return func(opt *authorizerConfig) {
+ opt.credentials = creds
+ }
+}
+
+// WithAuthHeader provides HTTP headers for authorization
+func WithAuthHeader(hdr http.Header) AuthorizerOpt {
+ return func(opt *authorizerConfig) {
+ opt.header = hdr
+ }
+}
+
+// OnFetchRefreshToken is called on fetching request token.
+type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request)
+
+// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token").
+func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt {
+ return func(opt *authorizerConfig) {
+ opt.onFetchRefreshToken = f
+ }
+}
+
+// NewDockerAuthorizer creates an authorizer using Docker's registry
+// authentication spec.
+// See https://docs.docker.com/registry/spec/auth/
+func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
+ var ao authorizerConfig
+ for _, opt := range opts {
+ opt(&ao)
+ }
+
+ if ao.client == nil {
+ ao.client = http.DefaultClient
+ }
+
+ return &dockerAuthorizer{
+ credentials: ao.credentials,
+ client: ao.client,
+ header: ao.header,
+ handlers: make(map[string]*authHandler),
+ onFetchRefreshToken: ao.onFetchRefreshToken,
+ }
+}
+
+// Authorize handles auth request.
+func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error {
+ // skip if there is no auth handler
+ ah := a.getAuthHandler(req.URL.Host)
+ if ah == nil {
+ return nil
+ }
+
+ auth, refreshToken, err := ah.authorize(ctx)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Authorization", auth)
+
+ if refreshToken != "" {
+ a.mu.RLock()
+ onFetchRefreshToken := a.onFetchRefreshToken
+ a.mu.RUnlock()
+ if onFetchRefreshToken != nil {
+ onFetchRefreshToken(ctx, refreshToken, req)
+ }
+ }
+ return nil
+}
+
+func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ return a.handlers[host]
+}
+
+func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error {
+ last := responses[len(responses)-1]
+ host := last.Request.URL.Host
+
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ for _, c := range auth.ParseAuthHeader(last.Header) {
+ if c.Scheme == auth.BearerAuth {
+ if retry, err := invalidAuthorization(ctx, c, responses); err != nil {
+ delete(a.handlers, host)
+ return err
+ } else if retry {
+ delete(a.handlers, host)
+ }
+
+ // reuse existing handler
+ //
+ // assume that one registry will return the common
+ // challenge information, including realm and service.
+ // and the resource scope is only different part
+ // which can be provided by each request.
+ if _, ok := a.handlers[host]; ok {
+ return nil
+ }
+
+ var username, secret string
+ if a.credentials != nil {
+ var err error
+ username, secret, err = a.credentials(host)
+ if err != nil {
+ return err
+ }
+ }
+
+ common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c)
+ if err != nil {
+ return err
+ }
+ common.FetchRefreshToken = a.onFetchRefreshToken != nil
+
+ a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common)
+ return nil
+ } else if c.Scheme == auth.BasicAuth && a.credentials != nil {
+ username, secret, err := a.credentials(host)
+ if err != nil {
+ return err
+ }
+
+ if username == "" || secret == "" {
+ return fmt.Errorf("%w: no basic auth credentials", ErrInvalidAuthorization)
+ }
+
+ a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, auth.TokenOptions{
+ Username: username,
+ Secret: secret,
+ })
+ return nil
+ }
+ }
+ return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented)
+}
+
+// authResult is used to control limit rate.
+type authResult struct {
+ sync.WaitGroup
+ token string
+ refreshToken string
+ expirationTime *time.Time
+ err error
+}
+
+// authHandler is used to handle auth request per registry server.
+type authHandler struct {
+ sync.Mutex
+
+ header http.Header
+
+ client *http.Client
+
+ // only support basic and bearer schemes
+ scheme auth.AuthenticationScheme
+
+ // common contains common challenge answer
+ common auth.TokenOptions
+
+ // scopedTokens caches token indexed by scopes, which used in
+ // bearer auth case
+ scopedTokens map[string]*authResult
+}
+
+func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler {
+ return &authHandler{
+ header: hdr,
+ client: client,
+ scheme: scheme,
+ common: opts,
+ scopedTokens: map[string]*authResult{},
+ }
+}
+
+func (ah *authHandler) authorize(ctx context.Context) (string, string, error) {
+ switch ah.scheme {
+ case auth.BasicAuth:
+ return ah.doBasicAuth(ctx)
+ case auth.BearerAuth:
+ return ah.doBearerAuth(ctx)
+ default:
+ return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented)
+ }
+}
+
+func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) {
+ username, secret := ah.common.Username, ah.common.Secret
+
+ if username == "" || secret == "" {
+ return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret")
+ }
+
+ auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret))
+ return fmt.Sprintf("Basic %s", auth), "", nil
+}
+
+func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) {
+ // copy common tokenOptions
+ to := ah.common
+
+ to.Scopes = GetTokenScopes(ctx, to.Scopes)
+
+ // Docs: https://docs.docker.com/registry/spec/auth/scope
+ scoped := strings.Join(to.Scopes, " ")
+
+ // Keep track of the expiration time of cached bearer tokens so they can be
+ // refreshed when they expire without a server roundtrip.
+ var expirationTime *time.Time
+
+ ah.Lock()
+ if r, exist := ah.scopedTokens[scoped]; exist && (r.expirationTime == nil || r.expirationTime.After(time.Now())) {
+ ah.Unlock()
+ r.Wait()
+ return r.token, r.refreshToken, r.err
+ }
+
+ // only one fetch token job
+ r := new(authResult)
+ r.Add(1)
+ ah.scopedTokens[scoped] = r
+ ah.Unlock()
+
+ defer func() {
+ token = fmt.Sprintf("Bearer %s", token)
+ r.token, r.refreshToken, r.err, r.expirationTime = token, refreshToken, err, expirationTime
+ r.Done()
+ }()
+
+ // fetch token for the resource scope
+ if to.Secret != "" {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("failed to fetch oauth token: %w", err)
+ }
+ }()
+ // credential information is provided, use oauth POST endpoint
+ // TODO: Allow setting client_id
+ resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to)
+ if err != nil {
+ var errStatus remoteerrors.ErrUnexpectedStatus
+ if errors.As(err, &errStatus) {
+ // Registries without support for POST may return 404 for POST /v2/token.
+ // As of September 2017, GCR is known to return 404.
+ // As of February 2018, JFrog Artifactory is known to return 401.
+ // As of January 2022, ACR is known to return 400.
+ if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 || errStatus.StatusCode == 400 {
+ resp, err := auth.FetchToken(ctx, ah.client, ah.header, to)
+ if err != nil {
+ return "", "", err
+ }
+ expirationTime = getExpirationTime(resp.ExpiresInSeconds)
+ return resp.Token, resp.RefreshToken, nil
+ }
+ log.G(ctx).WithFields(log.Fields{
+ "status": errStatus.Status,
+ "body": string(errStatus.Body),
+ }).Debugf("token request failed")
+ }
+ return "", "", err
+ }
+ expirationTime = getExpirationTime(resp.ExpiresInSeconds)
+ return resp.AccessToken, resp.RefreshToken, nil
+ }
+ // do request anonymously
+ resp, err := auth.FetchToken(ctx, ah.client, ah.header, to)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err)
+ }
+ expirationTime = getExpirationTime(resp.ExpiresInSeconds)
+ return resp.Token, resp.RefreshToken, nil
+}
+
+func getExpirationTime(expiresInSeconds int) *time.Time {
+ if expiresInSeconds <= 0 {
+ return nil
+ }
+ expirationTime := time.Now().Add(time.Duration(expiresInSeconds) * time.Second)
+ return &expirationTime
+}
+
+func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) {
+ errStr := c.Parameters["error"]
+ if errStr == "" {
+ return retry, nil
+ }
+
+ n := len(responses)
+ if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) {
+ limitedErr := errStr
+ errLenghLimit := 64
+ if len(limitedErr) > errLenghLimit {
+ limitedErr = limitedErr[:errLenghLimit] + "..."
+ }
+ log.G(ctx).WithField("error", limitedErr).Debug("authorization error using bearer token, retrying")
+ return true, nil
+ }
+
+ return retry, fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization)
+}
+
+func sameRequest(r1, r2 *http.Request) bool {
+ if r1.Method != r2.Method {
+ return false
+ }
+ if *r1.URL != *r2.URL {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/config_unix.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/config_unix.go
new file mode 100644
index 000000000000..9db18dedc6ee
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/config_unix.go
@@ -0,0 +1,42 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package config
+
+import (
+ "crypto/x509"
+ "path/filepath"
+)
+
+func hostPaths(root, host string) (hosts []string) {
+ ch := hostDirectory(host)
+ if ch != host {
+ hosts = append(hosts, filepath.Join(root, ch))
+ }
+
+ hosts = append(hosts,
+ filepath.Join(root, host),
+ filepath.Join(root, "_default"),
+ )
+
+ return
+}
+
+func rootSystemPool() (*x509.CertPool, error) {
+ return x509.SystemCertPool()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/config_windows.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/config_windows.go
new file mode 100644
index 000000000000..4697728b9c68
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/config_windows.go
@@ -0,0 +1,41 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package config
+
+import (
+ "crypto/x509"
+ "path/filepath"
+ "strings"
+)
+
+func hostPaths(root, host string) (hosts []string) {
+ ch := hostDirectory(host)
+ if ch != host {
+ hosts = append(hosts, filepath.Join(root, strings.Replace(ch, ":", "", -1)))
+ }
+
+ hosts = append(hosts,
+ filepath.Join(root, strings.Replace(host, ":", "", -1)),
+ filepath.Join(root, "_default"),
+ )
+
+ return
+}
+
+func rootSystemPool() (*x509.CertPool, error) {
+ return x509.NewCertPool(), nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/docker_fuzzer_internal.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/docker_fuzzer_internal.go
new file mode 100644
index 000000000000..335614fdb20d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/docker_fuzzer_internal.go
@@ -0,0 +1,44 @@
+//go:build gofuzz
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package config
+
+import (
+ "os"
+
+ fuzz "github.com/AdaLogics/go-fuzz-headers"
+)
+
+func FuzzParseHostsFile(data []byte) int {
+ f := fuzz.NewConsumer(data)
+ dir, err := os.MkdirTemp("", "fuzz-")
+ if err != nil {
+ return 0
+ }
+ err = f.CreateFiles(dir)
+ if err != nil {
+ return 0
+ }
+ defer os.RemoveAll(dir)
+ b, err := f.GetBytes()
+ if err != nil {
+ return 0
+ }
+ _, _ = parseHostsFile(dir, b)
+ return 1
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/hosts.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/hosts.go
new file mode 100644
index 000000000000..86ea23238cb4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/config/hosts.go
@@ -0,0 +1,618 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package config contains utilities for helping configure the Docker resolver
+package config
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/pelletier/go-toml/v2"
+ tomlu "github.com/pelletier/go-toml/v2/unstable"
+
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+)
+
+// UpdateClientFunc is a function that lets you to amend http Client behavior used by registry clients.
+type UpdateClientFunc func(client *http.Client) error
+
+type hostConfig struct {
+ scheme string
+ host string
+ path string
+
+ capabilities docker.HostCapabilities
+
+ caCerts []string
+ clientPairs [][2]string
+ skipVerify *bool
+
+ header http.Header
+
+ // TODO: Add credential configuration (domain alias, username)
+}
+
+// HostOptions is used to configure registry hosts
+type HostOptions struct {
+ HostDir func(string) (string, error)
+ Credentials func(host string) (string, string, error)
+ DefaultTLS *tls.Config
+ DefaultScheme string
+ // UpdateClient will be called after creating http.Client object, so clients can provide extra configuration
+ UpdateClient UpdateClientFunc
+ AuthorizerOpts []docker.AuthorizerOpt
+}
+
+// ConfigureHosts creates a registry hosts function from the provided
+// host creation options. The host directory can read hosts.toml or
+// certificate files laid out in the Docker specific layout.
+// If a `HostDir` function is not required, defaults are used.
+func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHosts {
+ return func(host string) ([]docker.RegistryHost, error) {
+ var hosts []hostConfig
+ if options.HostDir != nil {
+ dir, err := options.HostDir(host)
+ if err != nil && !errdefs.IsNotFound(err) {
+ return nil, err
+ }
+ if dir != "" {
+ log.G(ctx).WithField("dir", dir).Debug("loading host directory")
+ hosts, err = loadHostDir(ctx, dir)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // If hosts was not set, add a default host
+ // NOTE: Check nil here and not empty, the host may be
+ // intentionally configured to not have any endpoints
+ if hosts == nil {
+ hosts = make([]hostConfig, 1)
+ }
+ if len(hosts) > 0 && hosts[len(hosts)-1].host == "" {
+ if host == "docker.io" {
+ hosts[len(hosts)-1].scheme = "https"
+ hosts[len(hosts)-1].host = "registry-1.docker.io"
+ } else if docker.IsLocalhost(host) {
+ hosts[len(hosts)-1].host = host
+ if options.DefaultScheme == "" {
+ _, port, _ := net.SplitHostPort(host)
+ if port == "" || port == "443" {
+ // If port is default or 443, only use https
+ hosts[len(hosts)-1].scheme = "https"
+ } else {
+ // HTTP fallback logic will be used when protocol is ambiguous
+ hosts[len(hosts)-1].scheme = "http"
+ }
+
+ // When port is 80, protocol is not ambiguous
+ if port != "80" {
+ // Skipping TLS verification for localhost
+ var skipVerify = true
+ hosts[len(hosts)-1].skipVerify = &skipVerify
+ }
+ } else {
+ hosts[len(hosts)-1].scheme = options.DefaultScheme
+ }
+ } else {
+ hosts[len(hosts)-1].host = host
+ if options.DefaultScheme != "" {
+ hosts[len(hosts)-1].scheme = options.DefaultScheme
+ } else {
+ hosts[len(hosts)-1].scheme = "https"
+ }
+ }
+ hosts[len(hosts)-1].path = "/v2"
+ hosts[len(hosts)-1].capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush
+ }
+
+ // tlsConfigured indicates that TLS was configured and HTTP endpoints should
+ // attempt to use the TLS configuration before falling back to HTTP
+ var tlsConfigured bool
+
+ var defaultTLSConfig *tls.Config
+ if options.DefaultTLS != nil {
+ tlsConfigured = true
+ defaultTLSConfig = options.DefaultTLS
+ } else {
+ defaultTLSConfig = &tls.Config{}
+ }
+
+ defaultTransport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ FallbackDelay: 300 * time.Millisecond,
+ }).DialContext,
+ MaxIdleConns: 10,
+ IdleConnTimeout: 30 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: defaultTLSConfig,
+ ExpectContinueTimeout: 5 * time.Second,
+ }
+
+ client := &http.Client{
+ Transport: defaultTransport,
+ }
+ if options.UpdateClient != nil {
+ if err := options.UpdateClient(client); err != nil {
+ return nil, err
+ }
+ }
+
+ authOpts := []docker.AuthorizerOpt{docker.WithAuthClient(client)}
+ if options.Credentials != nil {
+ authOpts = append(authOpts, docker.WithAuthCreds(options.Credentials))
+ }
+ authOpts = append(authOpts, options.AuthorizerOpts...)
+ authorizer := docker.NewDockerAuthorizer(authOpts...)
+
+ rhosts := make([]docker.RegistryHost, len(hosts))
+ for i, host := range hosts {
+ // Allow setting for each host as well
+ explicitTLS := tlsConfigured
+
+ if host.caCerts != nil || host.clientPairs != nil || host.skipVerify != nil {
+ explicitTLS = true
+ tr := defaultTransport.Clone()
+ tlsConfig := tr.TLSClientConfig
+ if host.skipVerify != nil {
+ tlsConfig.InsecureSkipVerify = *host.skipVerify
+ }
+ if host.caCerts != nil {
+ if tlsConfig.RootCAs == nil {
+ rootPool, err := rootSystemPool()
+ if err != nil {
+ return nil, fmt.Errorf("unable to initialize cert pool: %w", err)
+ }
+ tlsConfig.RootCAs = rootPool
+ }
+ for _, f := range host.caCerts {
+ data, err := os.ReadFile(f)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read CA cert %q: %w", f, err)
+ }
+ if !tlsConfig.RootCAs.AppendCertsFromPEM(data) {
+ return nil, fmt.Errorf("unable to load CA cert %q", f)
+ }
+ }
+ }
+
+ for _, pair := range host.clientPairs {
+ certPEMBlock, err := os.ReadFile(pair[0])
+ if err != nil {
+ return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[0], err)
+ }
+ var keyPEMBlock []byte
+ if pair[1] != "" {
+ keyPEMBlock, err = os.ReadFile(pair[1])
+ if err != nil {
+ return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[1], err)
+ }
+ } else {
+ // Load key block from same PEM file
+ keyPEMBlock = certPEMBlock
+ }
+ cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load X509 key pair: %w", err)
+ }
+
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ c := *client
+ c.Transport = tr
+ if options.UpdateClient != nil {
+ if err := options.UpdateClient(&c); err != nil {
+ return nil, err
+ }
+ }
+
+ rhosts[i].Client = &c
+ rhosts[i].Authorizer = docker.NewDockerAuthorizer(append(authOpts, docker.WithAuthClient(&c))...)
+ } else {
+ rhosts[i].Client = client
+ rhosts[i].Authorizer = authorizer
+ }
+
+ // When TLS has been configured for the operation or host and
+ // the protocol from the port number is ambiguous, use the
+ // docker.NewHTTPFallback roundtripper to catch TLS errors and re-attempt the
+ // request as http. This allows preference for https when configured but
+ // also catches TLS errors early enough in the request to avoid sending
+ // the request twice or consuming the request body.
+ if host.scheme == "http" && explicitTLS {
+ _, port, _ := net.SplitHostPort(host.host)
+ if port != "80" {
+ log.G(ctx).WithField("host", host.host).Info("host will try HTTPS first since it is configured for HTTP with a TLS configuration, consider changing host to HTTPS or removing unused TLS configuration")
+ host.scheme = "https"
+ rhosts[i].Client.Transport = docker.NewHTTPFallback(rhosts[i].Client.Transport)
+ }
+ }
+
+ rhosts[i].Scheme = host.scheme
+ rhosts[i].Host = host.host
+ rhosts[i].Path = host.path
+ rhosts[i].Capabilities = host.capabilities
+ rhosts[i].Header = host.header
+ }
+
+ return rhosts, nil
+ }
+
+}
+
+// HostDirFromRoot returns a function which finds a host directory
+// based at the given root.
+func HostDirFromRoot(root string) func(string) (string, error) {
+ return func(host string) (string, error) {
+ for _, p := range hostPaths(root, host) {
+ if _, err := os.Stat(p); err == nil {
+ return p, nil
+ } else if !os.IsNotExist(err) {
+ return "", err
+ }
+ }
+ return "", errdefs.ErrNotFound
+ }
+}
+
+// hostDirectory converts ":port" to "_port_" in directory names
+func hostDirectory(host string) string {
+ idx := strings.LastIndex(host, ":")
+ if idx > 0 {
+ return host[:idx] + "_" + host[idx+1:] + "_"
+ }
+ return host
+}
+
+func loadHostDir(ctx context.Context, hostsDir string) ([]hostConfig, error) {
+ b, err := os.ReadFile(filepath.Join(hostsDir, "hosts.toml"))
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ // If hosts.toml does not exist, fallback to checking for
+ // certificate files based on Docker's certificate file
+ // pattern (".crt", ".cert", ".key" files)
+ return loadCertFiles(ctx, hostsDir)
+ }
+
+ hosts, err := parseHostsFile(hostsDir, b)
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("failed to decode %s", filepath.Join(hostsDir, "hosts.toml"))
+ // Fallback to checking certificate files
+ return loadCertFiles(ctx, hostsDir)
+ }
+
+ return hosts, nil
+}
+
+type hostFileConfig struct {
+ // Capabilities determine what operations a host is
+ // capable of performing. Allowed values
+ // - pull
+ // - resolve
+ // - push
+ Capabilities []string `toml:"capabilities"`
+
+ // CACert are the public key certificates for TLS
+ // Accepted types
+ // - string - Single file with certificate(s)
+ // - []string - Multiple files with certificates
+ CACert interface{} `toml:"ca"`
+
+ // Client keypair(s) for TLS with client authentication
+ // Accepted types
+ // - string - Single file with public and private keys
+ // - []string - Multiple files with public and private keys
+ // - [][2]string - Multiple keypairs with public and private keys in separate files
+ Client interface{} `toml:"client"`
+
+ // SkipVerify skips verification of the server's certificate chain
+ // and host name. This should only be used for testing or in
+ // combination with other methods of verifying connections.
+ SkipVerify *bool `toml:"skip_verify"`
+
+ // Header are additional header files to send to the server
+ Header map[string]interface{} `toml:"header"`
+
+ // OverridePath indicates the API root endpoint is defined in the URL
+ // path rather than by the API specification.
+ // This may be used with non-compliant OCI registries to override the
+ // API root endpoint.
+ OverridePath bool `toml:"override_path"`
+
+ // TODO: Credentials: helper? name? username? alternate domain? token?
+}
+
+func parseHostsFile(baseDir string, b []byte) ([]hostConfig, error) {
+ orderedHosts, err := getSortedHosts(b)
+ if err != nil {
+ return nil, err
+ }
+
+ c := struct {
+ hostFileConfig
+ // Server specifies the default server. When `host` is
+ // also specified, those hosts are tried first.
+ Server string `toml:"server"`
+ // HostConfigs store the per-host configuration
+ HostConfigs map[string]hostFileConfig `toml:"host"`
+ }{}
+
+ var (
+ hosts []hostConfig
+ )
+
+ if err := toml.Unmarshal(b, &c); err != nil {
+ return nil, err
+ }
+
+ // Parse hosts array
+ for _, host := range orderedHosts {
+ config := c.HostConfigs[host]
+
+ parsed, err := parseHostConfig(host, baseDir, config)
+ if err != nil {
+ return nil, err
+ }
+ hosts = append(hosts, parsed)
+ }
+
+ // Parse root host config and append it as the last element
+ parsed, err := parseHostConfig(c.Server, baseDir, c.hostFileConfig)
+ if err != nil {
+ return nil, err
+ }
+ hosts = append(hosts, parsed)
+
+ return hosts, nil
+}
+
+func parseHostConfig(server string, baseDir string, config hostFileConfig) (hostConfig, error) {
+ var (
+ result = hostConfig{}
+ err error
+ )
+
+ if server != "" {
+ if !strings.HasPrefix(server, "http") {
+ server = "https://" + server
+ }
+ u, err := url.Parse(server)
+ if err != nil {
+ return hostConfig{}, fmt.Errorf("unable to parse server %v: %w", server, err)
+ }
+ result.scheme = u.Scheme
+ result.host = u.Host
+ if len(u.Path) > 0 {
+ u.Path = path.Clean(u.Path)
+ if !strings.HasSuffix(u.Path, "/v2") && !config.OverridePath {
+ u.Path = u.Path + "/v2"
+ }
+ } else if !config.OverridePath {
+ u.Path = "/v2"
+ }
+ result.path = u.Path
+ }
+
+ result.skipVerify = config.SkipVerify
+
+ if len(config.Capabilities) > 0 {
+ for _, c := range config.Capabilities {
+ switch strings.ToLower(c) {
+ case "pull":
+ result.capabilities |= docker.HostCapabilityPull
+ case "resolve":
+ result.capabilities |= docker.HostCapabilityResolve
+ case "push":
+ result.capabilities |= docker.HostCapabilityPush
+ default:
+ return hostConfig{}, fmt.Errorf("unknown capability %v", c)
+ }
+ }
+ } else {
+ result.capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush
+ }
+
+ if config.CACert != nil {
+ switch cert := config.CACert.(type) {
+ case string:
+ result.caCerts = []string{makeAbsPath(cert, baseDir)}
+ case []interface{}:
+ result.caCerts, err = makeStringSlice(cert, func(p string) string {
+ return makeAbsPath(p, baseDir)
+ })
+ if err != nil {
+ return hostConfig{}, err
+ }
+ default:
+ return hostConfig{}, fmt.Errorf("invalid type %v for \"ca\"", cert)
+ }
+ }
+
+ if config.Client != nil {
+ switch client := config.Client.(type) {
+ case string:
+ result.clientPairs = [][2]string{{makeAbsPath(client, baseDir), ""}}
+ case []interface{}:
+ // []string or [][2]string
+ for _, pairs := range client {
+ switch p := pairs.(type) {
+ case string:
+ result.clientPairs = append(result.clientPairs, [2]string{makeAbsPath(p, baseDir), ""})
+ case []interface{}:
+ slice, err := makeStringSlice(p, func(s string) string {
+ return makeAbsPath(s, baseDir)
+ })
+ if err != nil {
+ return hostConfig{}, err
+ }
+ if len(slice) != 2 {
+ return hostConfig{}, fmt.Errorf("invalid pair %v for \"client\"", p)
+ }
+
+ var pair [2]string
+ copy(pair[:], slice)
+ result.clientPairs = append(result.clientPairs, pair)
+ default:
+ return hostConfig{}, fmt.Errorf("invalid type %T for \"client\"", p)
+ }
+ }
+ default:
+ return hostConfig{}, fmt.Errorf("invalid type %v for \"client\"", client)
+ }
+ }
+
+ if config.Header != nil {
+ header := http.Header{}
+ for key, ty := range config.Header {
+ switch value := ty.(type) {
+ case string:
+ header[key] = []string{value}
+ case []interface{}:
+ header[key], err = makeStringSlice(value, nil)
+ if err != nil {
+ return hostConfig{}, err
+ }
+ default:
+ return hostConfig{}, fmt.Errorf("invalid type %v for header %q", ty, key)
+ }
+ }
+ result.header = header
+ }
+
+ return result, nil
+}
+
+// getSortedHosts returns the list of hosts in the order are they defined in the file.
+func getSortedHosts(b []byte) ([]string, error) {
+ var hostsInOrder []string
+
+ // Use toml unstable package for directly parsing toml
+ // See /~https://github.com/pelletier/go-toml/discussions/801#discussioncomment-7083586
+ p := tomlu.Parser{}
+ p.Reset(b)
+
+ var host string
+ // iterate over all top level expressions
+ for p.NextExpression() {
+ e := p.Expression()
+
+ if e.Kind != tomlu.Table {
+ continue
+ }
+
+ // Let's look at the key. It's an iterator over the multiple dotted parts of the key.
+ var parts []string
+ for it := e.Key(); it.Next(); {
+ parts = append(parts, string(it.Node().Data))
+ }
+
+ // only consider keys that look like `hosts.XXX`
+ // and skip subtables such as `hosts.XXX.header`
+ if len(parts) < 2 || parts[0] != "host" || parts[1] == host {
+ continue
+ }
+
+ host = parts[1]
+ hostsInOrder = append(hostsInOrder, host)
+ }
+
+ return hostsInOrder, nil
+}
+
+// makeStringSlice is a helper func to convert from []interface{} to []string.
+// Additionally an optional cb func may be passed to perform string mapping.
+func makeStringSlice(slice []interface{}, cb func(string) string) ([]string, error) {
+ out := make([]string, len(slice))
+ for i, value := range slice {
+ str, ok := value.(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to cast %v to string", value)
+ }
+
+ if cb != nil {
+ out[i] = cb(str)
+ } else {
+ out[i] = str
+ }
+ }
+ return out, nil
+}
+
+func makeAbsPath(p string, base string) string {
+ if filepath.IsAbs(p) {
+ return p
+ }
+ return filepath.Join(base, p)
+}
+
+// loadCertsDir loads certs from certsDir like "/etc/docker/certs.d" .
+// Compatible with Docker file layout
+// - files ending with ".crt" are treated as CA certificate files
+// - files ending with ".cert" are treated as client certificates, and
+// files with the same name but ending with ".key" are treated as the
+// corresponding private key.
+// NOTE: If a ".key" file is missing, this function will just return
+// the ".cert", which may contain the private key. If the ".cert" file
+// does not contain the private key, the caller should detect and error.
+func loadCertFiles(ctx context.Context, certsDir string) ([]hostConfig, error) {
+ fs, err := os.ReadDir(certsDir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ hosts := make([]hostConfig, 1)
+ for _, f := range fs {
+ if f.IsDir() {
+ continue
+ }
+ if strings.HasSuffix(f.Name(), ".crt") {
+ hosts[0].caCerts = append(hosts[0].caCerts, filepath.Join(certsDir, f.Name()))
+ }
+ if strings.HasSuffix(f.Name(), ".cert") {
+ var pair [2]string
+ certFile := f.Name()
+ pair[0] = filepath.Join(certsDir, certFile)
+ // Check if key also exists
+ keyFile := filepath.Join(certsDir, certFile[:len(certFile)-5]+".key")
+ if _, err := os.Stat(keyFile); err == nil {
+ pair[1] = keyFile
+ } else if !os.IsNotExist(err) {
+ return nil, err
+ }
+ hosts[0].clientPairs = append(hosts[0].clientPairs, pair)
+ }
+ }
+ return hosts, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter.go
new file mode 100644
index 000000000000..f21103e17f1a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter.go
@@ -0,0 +1,85 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/log"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// LegacyConfigMediaType should be replaced by OCI image spec.
+//
+// More detail: docker/distribution#1622
+const LegacyConfigMediaType = "application/octet-stream"
+
+// ConvertManifest changes application/octet-stream to schema2 config media type if need.
+//
+// NOTE:
+// 1. original manifest will be deleted by next gc round.
+// 2. don't cover manifest list.
+func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
+ if !images.IsManifestType(desc.MediaType) {
+ log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType)
+ return desc, nil
+ }
+
+ // read manifest data
+ mb, err := content.ReadBlob(ctx, store, desc)
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err)
+ }
+
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(mb, &manifest); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err)
+ }
+
+ // check config media type
+ if manifest.Config.MediaType != LegacyConfigMediaType {
+ return desc, nil
+ }
+
+ manifest.Config.MediaType = images.MediaTypeDockerSchema2Config
+ data, err := json.MarshalIndent(manifest, "", " ")
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err)
+ }
+
+ // update manifest with gc labels
+ desc.Digest = digest.Canonical.FromBytes(data)
+ desc.Size = int64(len(data))
+
+ labels := map[string]string{}
+ for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) {
+ labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String()
+ }
+
+ ref := remotes.MakeRefKey(ctx, desc)
+ if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err)
+ }
+ return desc, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go
new file mode 100644
index 000000000000..c97b6b8994e3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go
@@ -0,0 +1,54 @@
+//go:build gofuzz
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "os"
+
+ fuzz "github.com/AdaLogics/go-fuzz-headers"
+ "github.com/containerd/containerd/v2/plugins/content/local"
+ "github.com/containerd/log"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func FuzzConvertManifest(data []byte) int {
+ ctx := context.Background()
+
+ // Do not log the message below
+ // level=warning msg="do nothing for media type: ..."
+ log.G(ctx).Logger.SetLevel(log.PanicLevel)
+
+ f := fuzz.NewConsumer(data)
+ desc := ocispec.Descriptor{}
+ err := f.GenerateStruct(&desc)
+ if err != nil {
+ return 0
+ }
+ tmpdir, err := os.MkdirTemp("", "fuzzing-")
+ if err != nil {
+ return 0
+ }
+ cs, err := local.NewStore(tmpdir)
+ if err != nil {
+ return 0
+ }
+ _, _ = ConvertManifest(ctx, cs, desc)
+ return 1
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go
new file mode 100644
index 000000000000..8c623bcbef72
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go
@@ -0,0 +1,283 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// ErrorCoder is the base interface for ErrorCode and Error allowing
+// users of each to just call ErrorCode to get the real ID of each
+type ErrorCoder interface {
+ ErrorCode() ErrorCode
+}
+
+// ErrorCode represents the error type. The errors are serialized via strings
+// and the integer format may change and should *never* be exported.
+type ErrorCode int
+
+var _ error = ErrorCode(0)
+
+// ErrorCode just returns itself
+func (ec ErrorCode) ErrorCode() ErrorCode {
+ return ec
+}
+
+// Error returns the ID/Value
+func (ec ErrorCode) Error() string {
+ // NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
+ return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
+}
+
+// Descriptor returns the descriptor for the error code.
+func (ec ErrorCode) Descriptor() ErrorDescriptor {
+ d, ok := errorCodeToDescriptors[ec]
+
+ if !ok {
+ return ErrorCodeUnknown.Descriptor()
+ }
+
+ return d
+}
+
+// String returns the canonical identifier for this error code.
+func (ec ErrorCode) String() string {
+ return ec.Descriptor().Value
+}
+
+// Message returned the human-readable error message for this error code.
+func (ec ErrorCode) Message() string {
+ return ec.Descriptor().Message
+}
+
+// MarshalText encodes the receiver into UTF-8-encoded text and returns the
+// result.
+func (ec ErrorCode) MarshalText() (text []byte, err error) {
+ return []byte(ec.String()), nil
+}
+
+// UnmarshalText decodes the form generated by MarshalText.
+func (ec *ErrorCode) UnmarshalText(text []byte) error {
+ desc, ok := idToDescriptors[string(text)]
+
+ if !ok {
+ desc = ErrorCodeUnknown.Descriptor()
+ }
+
+ *ec = desc.Code
+
+ return nil
+}
+
+// WithMessage creates a new Error struct based on the passed-in info and
+// overrides the Message property.
+func (ec ErrorCode) WithMessage(message string) Error {
+ return Error{
+ Code: ec,
+ Message: message,
+ }
+}
+
+// WithDetail creates a new Error struct based on the passed-in info and
+// set the Detail property appropriately
+func (ec ErrorCode) WithDetail(detail interface{}) Error {
+ return Error{
+ Code: ec,
+ Message: ec.Message(),
+ }.WithDetail(detail)
+}
+
+// WithArgs creates a new Error struct and sets the Args slice
+func (ec ErrorCode) WithArgs(args ...interface{}) Error {
+ return Error{
+ Code: ec,
+ Message: ec.Message(),
+ }.WithArgs(args...)
+}
+
+// Error provides a wrapper around ErrorCode with extra Details provided.
+type Error struct {
+ Code ErrorCode `json:"code"`
+ Message string `json:"message"`
+ Detail interface{} `json:"detail,omitempty"`
+
+ // TODO(duglin): See if we need an "args" property so we can do the
+ // variable substitution right before showing the message to the user
+}
+
+var _ error = Error{}
+
+// ErrorCode returns the ID/Value of this Error
+func (e Error) ErrorCode() ErrorCode {
+ return e.Code
+}
+
+// Error returns a human readable representation of the error.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
+}
+
+// WithDetail will return a new Error, based on the current one, but with
+// some Detail info added
+func (e Error) WithDetail(detail interface{}) Error {
+ return Error{
+ Code: e.Code,
+ Message: e.Message,
+ Detail: detail,
+ }
+}
+
+// WithArgs uses the passed-in list of interface{} as the substitution
+// variables in the Error's Message string, but returns a new Error
+func (e Error) WithArgs(args ...interface{}) Error {
+ return Error{
+ Code: e.Code,
+ Message: fmt.Sprintf(e.Code.Message(), args...),
+ Detail: e.Detail,
+ }
+}
+
+// ErrorDescriptor provides relevant information about a given error code.
+type ErrorDescriptor struct {
+ // Code is the error code that this descriptor describes.
+ Code ErrorCode
+
+ // Value provides a unique, string key, often captilized with
+ // underscores, to identify the error code. This value is used as the
+ // keyed value when serializing api errors.
+ Value string
+
+ // Message is a short, human readable description of the error condition
+ // included in API responses.
+ Message string
+
+ // Description provides a complete account of the errors purpose, suitable
+ // for use in documentation.
+ Description string
+
+ // HTTPStatusCode provides the http status code that is associated with
+ // this error condition.
+ HTTPStatusCode int
+}
+
+// ParseErrorCode returns the value by the string error code.
+// `ErrorCodeUnknown` will be returned if the error is not known.
+func ParseErrorCode(value string) ErrorCode {
+ ed, ok := idToDescriptors[value]
+ if ok {
+ return ed.Code
+ }
+
+ return ErrorCodeUnknown
+}
+
+// Errors provides the envelope for multiple errors and a few sugar methods
+// for use within the application.
+type Errors []error
+
+var _ error = Errors{}
+
+func (errs Errors) Error() string {
+ switch len(errs) {
+ case 0:
+ return ""
+ case 1:
+ return errs[0].Error()
+ default:
+ msg := "errors:\n"
+ for _, err := range errs {
+ msg += err.Error() + "\n"
+ }
+ return msg
+ }
+}
+
+// Len returns the current number of errors.
+func (errs Errors) Len() int {
+ return len(errs)
+}
+
+// MarshalJSON converts slice of error, ErrorCode or Error into a
+// slice of Error - then serializes
+func (errs Errors) MarshalJSON() ([]byte, error) {
+ var tmpErrs struct {
+ Errors []Error `json:"errors,omitempty"`
+ }
+
+ for _, daErr := range errs {
+ var err Error
+
+ switch daErr := daErr.(type) {
+ case ErrorCode:
+ err = daErr.WithDetail(nil)
+ case Error:
+ err = daErr
+ default:
+ err = ErrorCodeUnknown.WithDetail(daErr)
+
+ }
+
+ // If the Error struct was setup and they forgot to set the
+ // Message field (meaning its "") then grab it from the ErrCode
+ msg := err.Message
+ if msg == "" {
+ msg = err.Code.Message()
+ }
+
+ tmpErrs.Errors = append(tmpErrs.Errors, Error{
+ Code: err.Code,
+ Message: msg,
+ Detail: err.Detail,
+ })
+ }
+
+ return json.Marshal(tmpErrs)
+}
+
+// UnmarshalJSON deserializes []Error and then converts it into slice of
+// Error or ErrorCode
+func (errs *Errors) UnmarshalJSON(data []byte) error {
+ var tmpErrs struct {
+ Errors []Error
+ }
+
+ if err := json.Unmarshal(data, &tmpErrs); err != nil {
+ return err
+ }
+
+ var newErrs Errors
+ for _, daErr := range tmpErrs.Errors {
+ // If Message is empty or exactly matches the Code's message string
+ // then just use the Code, no need for a full Error struct
+ if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
+ // Error's w/o details get converted to ErrorCode
+ newErrs = append(newErrs, daErr.Code)
+ } else {
+ // Error's w/ details are untouched
+ newErrs = append(newErrs, Error{
+ Code: daErr.Code,
+ Message: daErr.Message,
+ Detail: daErr.Detail,
+ })
+ }
+ }
+
+ *errs = newErrs
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errdesc.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errdesc.go
new file mode 100644
index 000000000000..b2bd4d82bd87
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errdesc.go
@@ -0,0 +1,154 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "sync"
+)
+
+var (
+ errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
+ idToDescriptors = map[string]ErrorDescriptor{}
+ groupToDescriptors = map[string][]ErrorDescriptor{}
+)
+
+var (
+ // ErrorCodeUnknown is a generic error that can be used as a last
+ // resort if there is no situation-specific error message that can be used
+ ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
+ Value: "UNKNOWN",
+ Message: "unknown error",
+ Description: `Generic error returned when the error does not have an
+ API classification.`,
+ HTTPStatusCode: http.StatusInternalServerError,
+ })
+
+ // ErrorCodeUnsupported is returned when an operation is not supported.
+ ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
+ Value: "UNSUPPORTED",
+ Message: "The operation is unsupported.",
+ Description: `The operation was unsupported due to a missing
+ implementation or invalid set of parameters.`,
+ HTTPStatusCode: http.StatusMethodNotAllowed,
+ })
+
+ // ErrorCodeUnauthorized is returned if a request requires
+ // authentication.
+ ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
+ Value: "UNAUTHORIZED",
+ Message: "authentication required",
+ Description: `The access controller was unable to authenticate
+ the client. Often this will be accompanied by a
+ Www-Authenticate HTTP response header indicating how to
+ authenticate.`,
+ HTTPStatusCode: http.StatusUnauthorized,
+ })
+
+ // ErrorCodeDenied is returned if a client does not have sufficient
+ // permission to perform an action.
+ ErrorCodeDenied = Register("errcode", ErrorDescriptor{
+ Value: "DENIED",
+ Message: "requested access to the resource is denied",
+ Description: `The access controller denied access for the
+ operation on a resource.`,
+ HTTPStatusCode: http.StatusForbidden,
+ })
+
+ // ErrorCodeUnavailable provides a common error to report unavailability
+ // of a service or endpoint.
+ ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
+ Value: "UNAVAILABLE",
+ Message: "service unavailable",
+ Description: "Returned when a service is not available",
+ HTTPStatusCode: http.StatusServiceUnavailable,
+ })
+
+ // ErrorCodeTooManyRequests is returned if a client attempts too many
+ // times to contact a service endpoint.
+ ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
+ Value: "TOOMANYREQUESTS",
+ Message: "too many requests",
+ Description: `Returned when a client attempts to contact a
+ service too many times`,
+ HTTPStatusCode: http.StatusTooManyRequests,
+ })
+)
+
+var nextCode = 1000
+var registerLock sync.Mutex
+
+// Register will make the passed-in error known to the environment and
+// return a new ErrorCode
+func Register(group string, descriptor ErrorDescriptor) ErrorCode {
+ registerLock.Lock()
+ defer registerLock.Unlock()
+
+ descriptor.Code = ErrorCode(nextCode)
+
+ if _, ok := idToDescriptors[descriptor.Value]; ok {
+ panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
+ }
+ if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
+ panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
+ }
+
+ groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
+ errorCodeToDescriptors[descriptor.Code] = descriptor
+ idToDescriptors[descriptor.Value] = descriptor
+
+ nextCode++
+ return descriptor.Code
+}
+
+type byValue []ErrorDescriptor
+
+func (a byValue) Len() int { return len(a) }
+func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
+
+// GetGroupNames returns the list of Error group names that are registered
+func GetGroupNames() []string {
+ keys := []string{}
+
+ for k := range groupToDescriptors {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// GetErrorCodeGroup returns the named group of error descriptors
+func GetErrorCodeGroup(name string) []ErrorDescriptor {
+ desc := groupToDescriptors[name]
+ sort.Sort(byValue(desc))
+ return desc
+}
+
+// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
+// registered, irrespective of what group they're in
+func GetErrorAllDescriptors() []ErrorDescriptor {
+ result := []ErrorDescriptor{}
+
+ for _, group := range GetGroupNames() {
+ result = append(result, GetErrorCodeGroup(group)...)
+ }
+ sort.Sort(byValue(result))
+ return result
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go
new file mode 100644
index 000000000000..2c7d880a9d13
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go
@@ -0,0 +1,358 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "compress/flate"
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type dockerFetcher struct {
+ *dockerBase
+}
+
+func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest))
+
+ hosts := r.filterHosts(HostCapabilityPull)
+ if len(hosts) == 0 {
+ return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound)
+ }
+
+ ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false)
+ if err != nil {
+ return nil, err
+ }
+
+ return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
+ // firstly try fetch via external urls
+ for _, us := range desc.URLs {
+ u, err := url.Parse(us)
+ if err != nil {
+ log.G(ctx).WithError(err).Debugf("failed to parse %q", us)
+ continue
+ }
+ if u.Scheme != "http" && u.Scheme != "https" {
+ log.G(ctx).Debug("non-http(s) alternative url is unsupported")
+ continue
+ }
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
+ log.G(ctx).Info("request")
+
+ // Try this first, parse it
+ host := RegistryHost{
+ Client: http.DefaultClient,
+ Host: u.Host,
+ Scheme: u.Scheme,
+ Path: u.Path,
+ Capabilities: HostCapabilityPull,
+ }
+ req := r.request(host, http.MethodGet)
+ // Strip namespace from base
+ req.path = u.Path
+ if u.RawQuery != "" {
+ req.path = req.path + "?" + u.RawQuery
+ }
+
+ rc, err := r.open(ctx, req, desc.MediaType, offset)
+ if err != nil {
+ if errdefs.IsNotFound(err) {
+ continue // try one of the other urls.
+ }
+
+ return nil, err
+ }
+
+ return rc, nil
+ }
+
+ // Try manifests endpoints for manifests types
+ if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) ||
+ desc.MediaType == images.MediaTypeDockerSchema1Manifest {
+
+ var firstErr error
+ for _, host := range r.hosts {
+ req := r.request(host, http.MethodGet, "manifests", desc.Digest.String())
+ if err := req.addNamespace(r.refspec.Hostname()); err != nil {
+ return nil, err
+ }
+
+ rc, err := r.open(ctx, req, desc.MediaType, offset)
+ if err != nil {
+ // Store the error for referencing later
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue // try another host
+ }
+
+ return rc, nil
+ }
+
+ return nil, firstErr
+ }
+
+ // Finally use blobs endpoints
+ var firstErr error
+ for _, host := range r.hosts {
+ req := r.request(host, http.MethodGet, "blobs", desc.Digest.String())
+ if err := req.addNamespace(r.refspec.Hostname()); err != nil {
+ return nil, err
+ }
+
+ rc, err := r.open(ctx, req, desc.MediaType, offset)
+ if err != nil {
+ // Store the error for referencing later
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue // try another host
+ }
+
+ return rc, nil
+ }
+
+ if errdefs.IsNotFound(firstErr) {
+ firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w",
+ desc.Digest, desc.MediaType, errdefs.ErrNotFound,
+ )
+ }
+
+ return nil, firstErr
+
+ })
+}
+
+func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, mediatype string, ps ...string) (*request, int64, error) {
+ headReq := r.request(host, http.MethodHead, ps...)
+ if err := headReq.addNamespace(r.refspec.Hostname()); err != nil {
+ return nil, 0, err
+ }
+
+ if mediatype == "" {
+ headReq.header.Set("Accept", "*/*")
+ } else {
+ headReq.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
+ }
+
+ headResp, err := headReq.doWithRetries(ctx, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if headResp.Body != nil {
+ headResp.Body.Close()
+ }
+ if headResp.StatusCode > 299 {
+ return nil, 0, fmt.Errorf("unexpected HEAD status code %v: %s", headReq.String(), headResp.Status)
+ }
+
+ getReq := r.request(host, http.MethodGet, ps...)
+ if err := getReq.addNamespace(r.refspec.Hostname()); err != nil {
+ return nil, 0, err
+ }
+ return getReq, headResp.ContentLength, nil
+}
+
+func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...remotes.FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error) {
+ var desc ocispec.Descriptor
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst))
+ var config remotes.FetchByDigestConfig
+ for _, o := range opts {
+ if err := o(ctx, &config); err != nil {
+ return nil, desc, err
+ }
+ }
+
+ hosts := r.filterHosts(HostCapabilityPull)
+ if len(hosts) == 0 {
+ return nil, desc, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound)
+ }
+
+ ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false)
+ if err != nil {
+ return nil, desc, err
+ }
+
+ var (
+ getReq *request
+ sz int64
+ firstErr error
+ )
+
+ for _, host := range r.hosts {
+ getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "blobs", dgst.String())
+ if err == nil {
+ break
+ }
+ // Store the error for referencing later
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if getReq == nil {
+ // Fall back to the "manifests" endpoint
+ for _, host := range r.hosts {
+ getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "manifests", dgst.String())
+ if err == nil {
+ break
+ }
+ // Store the error for referencing later
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+
+ if getReq == nil {
+ if errdefs.IsNotFound(firstErr) {
+ firstErr = fmt.Errorf("could not fetch content %v from remote: %w", dgst, errdefs.ErrNotFound)
+ }
+ if firstErr == nil {
+ firstErr = fmt.Errorf("could not fetch content %v from remote: (unknown)", dgst)
+ }
+ return nil, desc, firstErr
+ }
+
+ seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) {
+ return r.open(ctx, getReq, config.Mediatype, offset)
+ })
+ if err != nil {
+ return nil, desc, err
+ }
+
+ desc = ocispec.Descriptor{
+ MediaType: "application/octet-stream",
+ Digest: dgst,
+ Size: sz,
+ }
+ if config.Mediatype != "" {
+ desc.MediaType = config.Mediatype
+ }
+ return seeker, desc, nil
+}
+
+func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) {
+ if mediatype == "" {
+ req.header.Set("Accept", "*/*")
+ } else {
+ req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
+ }
+ req.header.Set("Accept-Encoding", "zstd;q=1.0, gzip;q=0.8, deflate;q=0.5")
+
+ if offset > 0 {
+ // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
+ // will return the header without supporting the range. The content
+ // range must always be checked.
+ req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+ }
+
+ resp, err := req.doWithRetries(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if retErr != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if resp.StatusCode > 299 {
+ // TODO(stevvooe): When doing a offset specific request, we should
+ // really distinguish between a 206 and a 200. In the case of 200, we
+ // can discard the bytes, hiding the seek behavior from the
+ // implementation.
+
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound)
+ }
+ var registryErr Errors
+ if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
+ return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
+ }
+ return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
+ }
+ if offset > 0 {
+ cr := resp.Header.Get("content-range")
+ if cr != "" {
+ if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
+ return nil, fmt.Errorf("unhandled content range in response: %v", cr)
+
+ }
+ } else {
+ // TODO: Should any cases where use of content range
+ // without the proper header be considered?
+ // 206 responses?
+
+ // Discard up to offset
+ // Could use buffer pool here but this case should be rare
+ n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset))
+ if err != nil {
+ return nil, fmt.Errorf("failed to discard to offset: %w", err)
+ }
+ if n != offset {
+ return nil, errors.New("unable to discard to offset")
+ }
+
+ }
+ }
+
+ body := resp.Body
+ encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool {
+ return r == ' ' || r == '\t' || r == ','
+ })
+ for i := len(encoding) - 1; i >= 0; i-- {
+ algorithm := strings.ToLower(encoding[i])
+ switch algorithm {
+ case "zstd":
+ r, err := zstd.NewReader(body)
+ if err != nil {
+ return nil, err
+ }
+ body = r.IOReadCloser()
+ case "gzip":
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ return nil, err
+ }
+ case "deflate":
+ body = flate.NewReader(body)
+ case "identity", "":
+ // no content-encoding applied, use raw body
+ default:
+ return nil, errors.New("unsupported Content-Encoding algorithm: " + algorithm)
+ }
+ }
+
+ return body, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go
new file mode 100644
index 000000000000..5da9dca7a63d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go
@@ -0,0 +1,75 @@
+//go:build gofuzz
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strconv"
+)
+
+func FuzzFetcher(data []byte) int {
+ dataLen := len(data)
+ if dataLen == 0 {
+ return -1
+ }
+
+ s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen))
+ rw.Header().Set("content-length", strconv.Itoa(dataLen))
+ rw.Write(data)
+ }))
+ defer s.Close()
+
+ u, err := url.Parse(s.URL)
+ if err != nil {
+ return 0
+ }
+
+ f := dockerFetcher{&dockerBase{
+ repository: "nonempty",
+ }}
+ host := RegistryHost{
+ Client: s.Client(),
+ Host: u.Host,
+ Scheme: u.Scheme,
+ Path: u.Path,
+ }
+
+ ctx := context.Background()
+ req := f.request(host, http.MethodGet)
+ rc, err := f.open(ctx, req, "", 0)
+ if err != nil {
+ return 0
+ }
+ b, err := io.ReadAll(rc)
+ if err != nil {
+ return 0
+ }
+
+ expected := data
+ if len(b) != len(expected) {
+ panic("len of request is not equal to len of expected but should be")
+ }
+ return 1
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/handler.go
new file mode 100644
index 000000000000..615869a22457
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/handler.go
@@ -0,0 +1,149 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/log"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// AppendDistributionSourceLabel updates the label of blob with distribution source.
+func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
+ refspec, err := reference.Parse(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ u, err := url.Parse("dummy://" + refspec.Locator)
+ if err != nil {
+ return nil, err
+ }
+
+ source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/")
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ info, err := manager.Info(ctx, desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+
+ key := distributionSourceLabelKey(source)
+
+ originLabel := ""
+ if info.Labels != nil {
+ originLabel = info.Labels[key]
+ }
+ value := appendDistributionSourceLabel(originLabel, repo)
+
+ // The repo name has been limited under 256 and the distribution
+ // label might hit the limitation of label size, when blob data
+ // is used as the very, very common layer.
+ if err := labels.Validate(key, value); err != nil {
+ log.G(ctx).Warnf("skip to append distribution label: %s", err)
+ return nil, nil
+ }
+
+ info = content.Info{
+ Digest: desc.Digest,
+ Labels: map[string]string{
+ key: value,
+ },
+ }
+ _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key))
+ return nil, err
+ }, nil
+}
+
+func appendDistributionSourceLabel(originLabel, repo string) string {
+ repos := []string{}
+ if originLabel != "" {
+ repos = strings.Split(originLabel, ",")
+ }
+ repos = append(repos, repo)
+
+ // use empty string to present duplicate items
+ for i := 1; i < len(repos); i++ {
+ tmp, j := repos[i], i-1
+ for ; j >= 0 && repos[j] >= tmp; j-- {
+ if repos[j] == tmp {
+ tmp = ""
+ }
+ repos[j+1] = repos[j]
+ }
+ repos[j+1] = tmp
+ }
+
+ i := 0
+ for ; i < len(repos) && repos[i] == ""; i++ {
+ }
+
+ return strings.Join(repos[i:], ",")
+}
+
+func distributionSourceLabelKey(source string) string {
+ return labels.LabelDistributionSource + "." + source
+}
+
+// selectRepositoryMountCandidate will select the repo which has longest
+// common prefix components as the candidate.
+func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string {
+ u, err := url.Parse("dummy://" + refspec.Locator)
+ if err != nil {
+ // NOTE: basically, it won't be error here
+ return ""
+ }
+
+ source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/")
+ repoLabel, ok := sources[distributionSourceLabelKey(source)]
+ if !ok || repoLabel == "" {
+ return ""
+ }
+
+ n, match := 0, ""
+ components := strings.Split(target, "/")
+ for _, repo := range strings.Split(repoLabel, ",") {
+ // the target repo is not a candidate
+ if repo == target {
+ continue
+ }
+
+ if l := commonPrefixComponents(components, repo); l >= n {
+ n, match = l, repo
+ }
+ }
+ return match
+}
+
+func commonPrefixComponents(components []string, target string) int {
+ targetComponents := strings.Split(target, "/")
+
+ i := 0
+ for ; i < len(components) && i < len(targetComponents); i++ {
+ if components[i] != targetComponents[i] {
+ break
+ }
+ }
+ return i
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/httpreadseeker.go
new file mode 100644
index 000000000000..6739e7904e1b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/httpreadseeker.go
@@ -0,0 +1,178 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+)
+
+const maxRetry = 3
+
+type httpReadSeeker struct {
+ size int64
+ offset int64
+ rc io.ReadCloser
+ open func(offset int64) (io.ReadCloser, error)
+ closed bool
+
+ errsWithNoProgress int
+}
+
+func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) {
+ return &httpReadSeeker{
+ size: size,
+ open: open,
+ }, nil
+}
+
+func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
+ if hrs.closed {
+ return 0, io.EOF
+ }
+
+ rd, err := hrs.reader()
+ if err != nil {
+ return 0, err
+ }
+
+ n, err = rd.Read(p)
+ hrs.offset += int64(n)
+ if n > 0 || err == nil {
+ hrs.errsWithNoProgress = 0
+ }
+ if err == io.ErrUnexpectedEOF {
+ // connection closed unexpectedly. try reconnecting.
+ if n == 0 {
+ hrs.errsWithNoProgress++
+ if hrs.errsWithNoProgress > maxRetry {
+ return // too many retries for this offset with no progress
+ }
+ }
+ if hrs.rc != nil {
+ if clsErr := hrs.rc.Close(); clsErr != nil {
+ log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser")
+ }
+ hrs.rc = nil
+ }
+ if _, err2 := hrs.reader(); err2 == nil {
+ return n, nil
+ }
+ } else if err == io.EOF {
+ // The CRI's imagePullProgressTimeout relies on responseBody.Close to
+ // update the process monitor's status. If the err is io.EOF, close
+ // the connection since there is no more available data.
+ if hrs.rc != nil {
+ if clsErr := hrs.rc.Close(); clsErr != nil {
+ log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF")
+ }
+ hrs.rc = nil
+ }
+ }
+ return
+}
+
+func (hrs *httpReadSeeker) Close() error {
+ if hrs.closed {
+ return nil
+ }
+ hrs.closed = true
+ if hrs.rc != nil {
+ return hrs.rc.Close()
+ }
+
+ return nil
+}
+
+func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ if hrs.closed {
+ return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable)
+ }
+
+ abs := hrs.offset
+ switch whence {
+ case io.SeekStart:
+ abs = offset
+ case io.SeekCurrent:
+ abs += offset
+ case io.SeekEnd:
+ if hrs.size == -1 {
+ return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable)
+ }
+ abs = hrs.size + offset
+ default:
+ return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument)
+ }
+
+ if abs < 0 {
+ return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument)
+ }
+
+ if abs != hrs.offset {
+ if hrs.rc != nil {
+ if err := hrs.rc.Close(); err != nil {
+ log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser")
+ }
+
+ hrs.rc = nil
+ }
+
+ hrs.offset = abs
+ }
+
+ return hrs.offset, nil
+}
+
+func (hrs *httpReadSeeker) reader() (io.Reader, error) {
+ if hrs.rc != nil {
+ return hrs.rc, nil
+ }
+
+ if hrs.size == -1 || hrs.offset < hrs.size {
+ // only try to reopen the body request if we are seeking to a value
+ // less than the actual size.
+ if hrs.open == nil {
+ return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented)
+ }
+
+ rc, err := hrs.open(hrs.offset)
+ if err != nil {
+ return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err)
+ }
+
+ if hrs.rc != nil {
+ if err := hrs.rc.Close(); err != nil {
+ log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser")
+ }
+ }
+ hrs.rc = rc
+ } else {
+ // There is an edge case here where offset == size of the content. If
+ // we seek, we will probably get an error for content that cannot be
+ // sought (?). In that case, we should err on committing the content,
+ // as the length is already satisfied but we just return the empty
+ // reader instead.
+
+ hrs.rc = io.NopCloser(bytes.NewReader([]byte{}))
+ }
+
+ return hrs.rc, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go
new file mode 100644
index 000000000000..f994fff5a8c7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go
@@ -0,0 +1,558 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type dockerPusher struct {
+ *dockerBase
+ object string
+
+ // TODO: namespace tracker
+ tracker StatusTracker
+}
+
+// Writer implements Ingester API of content store. This allows the client
+// to receive ErrUnavailable when there is already an on-going upload.
+// Note that the tracker MUST implement StatusTrackLocker interface to avoid
+// race condition on StatusTracker.
+func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
+ var wOpts content.WriterOpts
+ for _, opt := range opts {
+ if err := opt(&wOpts); err != nil {
+ return nil, err
+ }
+ }
+ if wOpts.Ref == "" {
+ return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
+ }
+ return p.push(ctx, wOpts.Desc, wOpts.Ref, true)
+}
+
+func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
+ return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false)
+}
+
+func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) {
+ if l, ok := p.tracker.(StatusTrackLocker); ok {
+ l.Lock(ref)
+ defer l.Unlock(ref)
+ }
+ ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true)
+ if err != nil {
+ return nil, err
+ }
+ status, err := p.tracker.GetStatus(ref)
+ if err == nil {
+ if status.Committed && status.Offset == status.Total {
+ return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists)
+ }
+ if unavailableOnFail && status.ErrClosed == nil {
+ // Another push of this ref is happening elsewhere. The rest of function
+ // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there
+ // is no actively-tracked ref already).
+ return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable)
+ }
+ // TODO: Handle incomplete status
+ } else if !errdefs.IsNotFound(err) {
+ return nil, fmt.Errorf("failed to get status: %w", err)
+ }
+
+ hosts := p.filterHosts(HostCapabilityPush)
+ if len(hosts) == 0 {
+ return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound)
+ }
+
+ var (
+ isManifest bool
+ existCheck []string
+ host = hosts[0]
+ )
+
+ if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) {
+ isManifest = true
+ existCheck = getManifestPath(p.object, desc.Digest)
+ } else {
+ existCheck = []string{"blobs", desc.Digest.String()}
+ }
+
+ req := p.request(host, http.MethodHead, existCheck...)
+ req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", "))
+
+ log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to")
+
+ resp, err := req.doWithRetries(ctx, nil)
+ if err != nil {
+ if !errors.Is(err, ErrInvalidAuthorization) {
+ return nil, err
+ }
+ log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push")
+ } else {
+ if resp.StatusCode == http.StatusOK {
+ var exists bool
+ if isManifest && existCheck[1] != desc.Digest.String() {
+ dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
+ if dgstHeader == desc.Digest {
+ exists = true
+ }
+ } else {
+ exists = true
+ }
+
+ if exists {
+ p.tracker.SetStatus(ref, Status{
+ Committed: true,
+ PushStatus: PushStatus{
+ Exists: true,
+ },
+ Status: content.Status{
+ Ref: ref,
+ // TODO: Set updated time?
+ },
+ })
+ resp.Body.Close()
+ return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
+ }
+ } else if resp.StatusCode != http.StatusNotFound {
+ err := remoteserrors.NewUnexpectedStatusErr(resp)
+ log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
+ resp.Body.Close()
+ return nil, err
+ }
+ resp.Body.Close()
+ }
+
+ if isManifest {
+ putPath := getManifestPath(p.object, desc.Digest)
+ req = p.request(host, http.MethodPut, putPath...)
+ req.header.Add("Content-Type", desc.MediaType)
+ } else {
+ // Start upload request
+ req = p.request(host, http.MethodPost, "blobs", "uploads/")
+
+ mountedFrom := ""
+ var resp *http.Response
+ if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
+ preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
+ pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo)
+
+ // NOTE: the fromRepo might be private repo and
+ // auth service still can grant token without error.
+ // but the post request will fail because of 401.
+ //
+ // for the private repo, we should remove mount-from
+ // query and send the request again.
+ resp, err = preq.doWithRetries(pctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ switch resp.StatusCode {
+ case http.StatusUnauthorized:
+ log.G(ctx).Debugf("failed to mount from repository %s", fromRepo)
+
+ resp.Body.Close()
+ resp = nil
+ case http.StatusCreated:
+ mountedFrom = path.Join(p.refspec.Hostname(), fromRepo)
+ }
+ }
+
+ if resp == nil {
+ resp, err = req.doWithRetries(ctx, nil)
+ if err != nil {
+ if errors.Is(err, ErrInvalidAuthorization) {
+ return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err)
+ }
+ return nil, err
+ }
+ }
+ defer resp.Body.Close()
+
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
+ case http.StatusCreated:
+ p.tracker.SetStatus(ref, Status{
+ Committed: true,
+ PushStatus: PushStatus{
+ MountedFrom: mountedFrom,
+ },
+ Status: content.Status{
+ Ref: ref,
+ Total: desc.Size,
+ Offset: desc.Size,
+ },
+ })
+ return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
+ default:
+ err := remoteserrors.NewUnexpectedStatusErr(resp)
+ log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
+ return nil, err
+ }
+
+ var (
+ location = resp.Header.Get("Location")
+ lurl *url.URL
+ lhost = host
+ )
+ // Support paths without host in location
+ if strings.HasPrefix(location, "/") {
+ lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse location %v: %w", location, err)
+ }
+ } else {
+ if !strings.Contains(location, "://") {
+ location = lhost.Scheme + "://" + location
+ }
+ lurl, err = url.Parse(location)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse location %v: %w", location, err)
+ }
+
+ if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
+ lhost.Scheme = lurl.Scheme
+ lhost.Host = lurl.Host
+
+ // Check if different than what was requested, accounting for fallback in the transport layer
+ requested := resp.Request.URL
+ if requested.Host != lhost.Host || requested.Scheme != lhost.Scheme {
+ // Strip authorizer if change to host or scheme
+ lhost.Authorizer = nil
+ log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination, authorizer removed")
+ }
+ }
+ }
+ q := lurl.Query()
+ q.Add("digest", desc.Digest.String())
+
+ req = p.request(lhost, http.MethodPut)
+ req.header.Set("Content-Type", "application/octet-stream")
+ req.path = lurl.Path + "?" + q.Encode()
+ }
+ p.tracker.SetStatus(ref, Status{
+ Status: content.Status{
+ Ref: ref,
+ Total: desc.Size,
+ Expected: desc.Digest,
+ StartedAt: time.Now(),
+ },
+ })
+
+ // TODO: Support chunked upload
+
+ pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest)
+
+ req.body = func() (io.ReadCloser, error) {
+ pr, pw := io.Pipe()
+ pushw.setPipe(pw)
+ return pr, nil
+ }
+ req.size = desc.Size
+
+ go func() {
+ resp, err := req.doWithRetries(ctx, nil)
+ if err != nil {
+ pushw.setError(err)
+ return
+ }
+
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusCreated, http.StatusNoContent:
+ default:
+ err := remoteserrors.NewUnexpectedStatusErr(resp)
+ log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
+ pushw.setError(err)
+ return
+ }
+ pushw.setResponse(resp)
+ }()
+
+ return pushw, nil
+}
+
+func getManifestPath(object string, dgst digest.Digest) []string {
+ if i := strings.IndexByte(object, '@'); i >= 0 {
+ if object[i+1:] != dgst.String() {
+ // use digest, not tag
+ object = ""
+ } else {
+ // strip @ for registry path to make tag
+ object = object[:i]
+ }
+
+ }
+
+ if object == "" {
+ return []string{"manifests", dgst.String()}
+ }
+
+ return []string{"manifests", object}
+}
+
+type pushWriter struct {
+ base *dockerBase
+ ref string
+
+ pipe *io.PipeWriter
+
+ done chan struct{}
+ closeOnce sync.Once
+
+ pipeC chan *io.PipeWriter
+ respC chan *http.Response
+ errC chan error
+
+ isManifest bool
+
+ expected digest.Digest
+ tracker StatusTracker
+}
+
+func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter {
+ // Initialize and create response
+ return &pushWriter{
+ base: db,
+ ref: ref,
+ expected: expected,
+ tracker: tracker,
+ pipeC: make(chan *io.PipeWriter, 1),
+ respC: make(chan *http.Response, 1),
+ errC: make(chan error, 1),
+ done: make(chan struct{}),
+ isManifest: isManifest,
+ }
+}
+
+func (pw *pushWriter) setPipe(p *io.PipeWriter) {
+ select {
+ case <-pw.done:
+ case pw.pipeC <- p:
+ }
+}
+
+func (pw *pushWriter) setError(err error) {
+ select {
+ case <-pw.done:
+ case pw.errC <- err:
+ }
+}
+
+func (pw *pushWriter) setResponse(resp *http.Response) {
+ select {
+ case <-pw.done:
+ case pw.respC <- resp:
+ }
+}
+
+func (pw *pushWriter) replacePipe(p *io.PipeWriter) error {
+ if pw.pipe == nil {
+ pw.pipe = p
+ return nil
+ }
+
+ pw.pipe.CloseWithError(content.ErrReset)
+ pw.pipe = p
+
+ // If content has already been written, the bytes
+ // cannot be written again and the caller must reset
+ status, err := pw.tracker.GetStatus(pw.ref)
+ if err != nil {
+ return err
+ }
+ status.Offset = 0
+ status.UpdatedAt = time.Now()
+ pw.tracker.SetStatus(pw.ref, status)
+ return content.ErrReset
+}
+
+func (pw *pushWriter) Write(p []byte) (n int, err error) {
+ status, err := pw.tracker.GetStatus(pw.ref)
+ if err != nil {
+ return n, err
+ }
+
+ if pw.pipe == nil {
+ select {
+ case <-pw.done:
+ return 0, io.ErrClosedPipe
+ case p := <-pw.pipeC:
+ pw.replacePipe(p)
+ }
+ } else {
+ select {
+ case <-pw.done:
+ return 0, io.ErrClosedPipe
+ case p := <-pw.pipeC:
+ return 0, pw.replacePipe(p)
+ default:
+ }
+ }
+
+ n, err = pw.pipe.Write(p)
+ if errors.Is(err, io.ErrClosedPipe) {
+ // if the pipe is closed, we might have the original error on the error
+ // channel - so we should try and get it
+ select {
+ case <-pw.done:
+ case err = <-pw.errC:
+ pw.Close()
+ case p := <-pw.pipeC:
+ return 0, pw.replacePipe(p)
+ case resp := <-pw.respC:
+ pw.setResponse(resp)
+ }
+ }
+ status.Offset += int64(n)
+ status.UpdatedAt = time.Now()
+ pw.tracker.SetStatus(pw.ref, status)
+ return
+}
+
+func (pw *pushWriter) Close() error {
+ // Ensure pipeC is closed but handle `Close()` being
+ // called multiple times without panicking
+ pw.closeOnce.Do(func() {
+ close(pw.done)
+ })
+ if pw.pipe != nil {
+ status, err := pw.tracker.GetStatus(pw.ref)
+ if err == nil && !status.Committed {
+ // Closing an incomplete writer. Record this as an error so that following write can retry it.
+ status.ErrClosed = errors.New("closed incomplete writer")
+ pw.tracker.SetStatus(pw.ref, status)
+ }
+ return pw.pipe.Close()
+ }
+ return nil
+}
+
+func (pw *pushWriter) Status() (content.Status, error) {
+ status, err := pw.tracker.GetStatus(pw.ref)
+ if err != nil {
+ return content.Status{}, err
+ }
+ return status.Status, nil
+
+}
+
+func (pw *pushWriter) Digest() digest.Digest {
+ // TODO: Get rid of this function?
+ return pw.expected
+}
+
+func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+ // Check whether read has already thrown an error
+ if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) {
+ return fmt.Errorf("pipe error before commit: %w", err)
+ }
+
+ if err := pw.pipe.Close(); err != nil {
+ return err
+ }
+ // TODO: timeout waiting for response
+ var resp *http.Response
+ select {
+ case <-pw.done:
+ return io.ErrClosedPipe
+ case err := <-pw.errC:
+ pw.Close()
+ return err
+ case resp = <-pw.respC:
+ defer resp.Body.Close()
+ case p := <-pw.pipeC:
+ // check whether the pipe has changed in the commit, because sometimes Write
+ // can complete successfully, but the pipe may have changed. In that case, the
+ // content needs to be reset.
+ return pw.replacePipe(p)
+ }
+
+ // 201 is specified return status, some registries return
+ // 200, 202 or 204.
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted:
+ default:
+ return remoteserrors.NewUnexpectedStatusErr(resp)
+ }
+
+ status, err := pw.tracker.GetStatus(pw.ref)
+ if err != nil {
+ return fmt.Errorf("failed to get status: %w", err)
+ }
+
+ if size > 0 && size != status.Offset {
+ return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size)
+ }
+
+ if expected == "" {
+ expected = status.Expected
+ }
+
+ actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
+ if err != nil {
+ return fmt.Errorf("invalid content digest in response: %w", err)
+ }
+
+ if actual != expected {
+ return fmt.Errorf("got digest %s, expected %s", actual, expected)
+ }
+
+ status.Committed = true
+ status.UpdatedAt = time.Now()
+ pw.tracker.SetStatus(pw.ref, status)
+
+ return nil
+}
+
+func (pw *pushWriter) Truncate(size int64) error {
+ // TODO: if blob close request and start new request at offset
+ // TODO: always error on manifest
+ return errors.New("cannot truncate remote upload")
+}
+
+func requestWithMountFrom(req *request, mount, from string) *request {
+ creq := *req
+
+ sep := "?"
+ if strings.Contains(creq.path, sep) {
+ sep = "&"
+ }
+
+ creq.path = creq.path + sep + "mount=" + mount + "&from=" + from
+
+ return &creq
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go
new file mode 100644
index 000000000000..98cafcd069e6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go
@@ -0,0 +1,244 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "errors"
+ "net"
+ "net/http"
+)
+
+// HostCapabilities represent the capabilities of the registry
+// host. This also represents the set of operations for which
+// the registry host may be trusted to perform.
+//
+// For example pushing is a capability which should only be
+// performed on an upstream source, not a mirror.
+// Resolving (the process of converting a name into a digest)
+// must be considered a trusted operation and only done by
+// a host which is trusted (or more preferably by secure process
+// which can prove the provenance of the mapping). A public
+// mirror should never be trusted to do a resolve action.
+//
+// | Registry Type | Pull | Resolve | Push |
+// |------------------|------|---------|------|
+// | Public Registry | yes | yes | yes |
+// | Private Registry | yes | yes | yes |
+// | Public Mirror | yes | no | no |
+// | Private Mirror | yes | yes | no |
+type HostCapabilities uint8
+
+const (
+ // HostCapabilityPull represents the capability to fetch manifests
+ // and blobs by digest
+ HostCapabilityPull HostCapabilities = 1 << iota
+
+ // HostCapabilityResolve represents the capability to fetch manifests
+ // by name
+ HostCapabilityResolve
+
+ // HostCapabilityPush represents the capability to push blobs and
+ // manifests
+ HostCapabilityPush
+
+ // Reserved for future capabilities (i.e. search, catalog, remove)
+)
+
+// Has checks whether the capabilities list has the provide capability
+func (c HostCapabilities) Has(t HostCapabilities) bool {
+ return c&t == t
+}
+
+// RegistryHost represents a complete configuration for a registry
+// host, representing the capabilities, authorizations, connection
+// configuration, and location.
+type RegistryHost struct {
+ Client *http.Client
+ Authorizer Authorizer
+ Host string
+ Scheme string
+ Path string
+ Capabilities HostCapabilities
+ Header http.Header
+}
+
+func (h RegistryHost) isProxy(refhost string) bool {
+ if refhost != h.Host {
+ if refhost != "docker.io" || h.Host != "registry-1.docker.io" {
+ return true
+ }
+ }
+ return false
+}
+
+// RegistryHosts fetches the registry hosts for a given namespace,
+// provided by the host component of an distribution image reference.
+type RegistryHosts func(string) ([]RegistryHost, error)
+
+// Registries joins multiple registry configuration functions, using the same
+// order as provided within the arguments. When an empty registry configuration
+// is returned with a nil error, the next function will be called.
+// NOTE: This function will not join configurations, as soon as a non-empty
+// configuration is returned from a configuration function, it will be returned
+// to the caller.
+func Registries(registries ...RegistryHosts) RegistryHosts {
+ return func(host string) ([]RegistryHost, error) {
+ for _, registry := range registries {
+ config, err := registry(host)
+ if err != nil {
+ return config, err
+ }
+ if len(config) > 0 {
+ return config, nil
+ }
+ }
+ return nil, nil
+ }
+}
+
+type registryOpts struct {
+ authorizer Authorizer
+ plainHTTP func(string) (bool, error)
+ host func(string) (string, error)
+ client *http.Client
+}
+
+// RegistryOpt defines a registry default option
+type RegistryOpt func(*registryOpts)
+
+// WithPlainHTTP configures registries to use plaintext http scheme
+// for the provided host match function.
+func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt {
+ return func(opts *registryOpts) {
+ opts.plainHTTP = f
+ }
+}
+
+// WithAuthorizer configures the default authorizer for a registry
+func WithAuthorizer(a Authorizer) RegistryOpt {
+ return func(opts *registryOpts) {
+ opts.authorizer = a
+ }
+}
+
+// WithHostTranslator defines the default translator to use for registry hosts
+func WithHostTranslator(h func(string) (string, error)) RegistryOpt {
+ return func(opts *registryOpts) {
+ opts.host = h
+ }
+}
+
+// WithClient configures the default http client for a registry
+func WithClient(c *http.Client) RegistryOpt {
+ return func(opts *registryOpts) {
+ opts.client = c
+ }
+}
+
+// ConfigureDefaultRegistries is used to create a default configuration for
+// registries. For more advanced configurations or per-domain setups,
+// the RegistryHosts interface should be used directly.
+// NOTE: This function will always return a non-empty value or error
+func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts {
+ var opts registryOpts
+ for _, opt := range ropts {
+ opt(&opts)
+ }
+
+ return func(host string) ([]RegistryHost, error) {
+ config := RegistryHost{
+ Client: opts.client,
+ Authorizer: opts.authorizer,
+ Host: host,
+ Scheme: "https",
+ Path: "/v2",
+ Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush,
+ }
+
+ if config.Client == nil {
+ config.Client = http.DefaultClient
+ }
+
+ if opts.plainHTTP != nil {
+ match, err := opts.plainHTTP(host)
+ if err != nil {
+ return nil, err
+ }
+ if match {
+ config.Scheme = "http"
+ }
+ }
+
+ if opts.host != nil {
+ var err error
+ config.Host, err = opts.host(config.Host)
+ if err != nil {
+ return nil, err
+ }
+ } else if host == "docker.io" {
+ config.Host = "registry-1.docker.io"
+ }
+
+ return []RegistryHost{config}, nil
+ }
+}
+
+// MatchAllHosts is a host match function which is always true.
+func MatchAllHosts(string) (bool, error) {
+ return true, nil
+}
+
+// MatchLocalhost is a host match function which returns true for
+// localhost.
+//
+// Note: this does not handle matching of ip addresses in octal,
+// decimal or hex form.
+func MatchLocalhost(host string) (bool, error) {
+ switch {
+ case host == "::1":
+ return true, nil
+ case host == "[::1]":
+ return true, nil
+ }
+ h, p, err := net.SplitHostPort(host)
+
+ // addrError helps distinguish between errors of form
+ // "no colon in address" and "too many colons in address".
+ // The former is fine as the host string need not have a
+ // port. Latter needs to be handled.
+ addrError := &net.AddrError{
+ Err: "missing port in address",
+ Addr: host,
+ }
+ if err != nil {
+ if err.Error() != addrError.Error() {
+ return false, err
+ }
+ // host string without any port specified
+ h = host
+ } else if len(p) == 0 {
+ return false, errors.New("invalid host name format")
+ }
+
+ // use ipv4 dotted decimal for further checking
+ if h == "localhost" {
+ h = "127.0.0.1"
+ }
+ ip := net.ParseIP(h)
+
+ return ip.IsLoopback(), nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go
new file mode 100644
index 000000000000..03f5b3693f5f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go
@@ -0,0 +1,799 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/containerd/v2/core/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
+ remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/containerd/v2/pkg/tracing"
+ "github.com/containerd/containerd/v2/version"
+)
+
+var (
+ // ErrInvalidAuthorization is used when credentials are passed to a server but
+ // those credentials are rejected.
+ ErrInvalidAuthorization = errors.New("authorization failed")
+
+ // MaxManifestSize represents the largest size accepted from a registry
+ // during resolution. Larger manifests may be accepted using a
+ // resolution method other than the registry.
+ //
+ // NOTE: The max supported layers by some runtimes is 128 and individual
+ // layers will not contribute more than 256 bytes, making a
+ // reasonable limit for a large image manifests of 32K bytes.
+ // 4M bytes represents a much larger upper bound for images which may
+ // contain large annotations or be non-images. A proper manifest
+ // design puts large metadata in subobjects, as is consistent the
+ // intent of the manifest design.
+ MaxManifestSize int64 = 4 * 1048 * 1048
+)
+
+// Authorizer is used to authorize HTTP requests based on 401 HTTP responses.
+// An Authorizer is responsible for caching tokens or credentials used by
+// requests.
+type Authorizer interface {
+ // Authorize sets the appropriate `Authorization` header on the given
+ // request.
+ //
+ // If no authorization is found for the request, the request remains
+ // unmodified. It may also add an `Authorization` header as
+ // "bearer "
+ // "basic "
+ //
+ // It may return remotes/errors.ErrUnexpectedStatus, which for example,
+ // can be used by the caller to find out the status code returned by the registry.
+ Authorize(context.Context, *http.Request) error
+
+ // AddResponses adds a 401 response for the authorizer to consider when
+ // authorizing requests. The last response should be unauthorized and
+ // the previous requests are used to consider redirects and retries
+ // that may have led to the 401.
+ //
+ // If response is not handled, returns `ErrNotImplemented`
+ AddResponses(context.Context, []*http.Response) error
+}
+
+// ResolverOptions are used to configured a new Docker register resolver
+type ResolverOptions struct {
+ // Hosts returns registry host configurations for a namespace.
+ Hosts RegistryHosts
+
+ // Headers are the HTTP request header fields sent by the resolver
+ Headers http.Header
+
+ // Tracker is used to track uploads to the registry. This is used
+ // since the registry does not have upload tracking and the existing
+ // mechanism for getting blob upload status is expensive.
+ Tracker StatusTracker
+
+ // Authorizer is used to authorize registry requests
+ //
+ // Deprecated: use Hosts.
+ Authorizer Authorizer
+
+ // Credentials provides username and secret given a host.
+ // If username is empty but a secret is given, that secret
+ // is interpreted as a long lived token.
+ //
+ // Deprecated: use Hosts.
+ Credentials func(string) (string, string, error)
+
+ // Host provides the hostname given a namespace.
+ //
+ // Deprecated: use Hosts.
+ Host func(string) (string, error)
+
+ // PlainHTTP specifies to use plain http and not https
+ //
+ // Deprecated: use Hosts.
+ PlainHTTP bool
+
+ // Client is the http client to used when making registry requests
+ //
+ // Deprecated: use Hosts.
+ Client *http.Client
+}
+
+// DefaultHost is the default host function.
+func DefaultHost(ns string) (string, error) {
+ if ns == "docker.io" {
+ return "registry-1.docker.io", nil
+ }
+ return ns, nil
+}
+
+type dockerResolver struct {
+ hosts RegistryHosts
+ header http.Header
+ resolveHeader http.Header
+ tracker StatusTracker
+}
+
+// NewResolver returns a new resolver to a Docker registry
+func NewResolver(options ResolverOptions) remotes.Resolver {
+ if options.Tracker == nil {
+ options.Tracker = NewInMemoryTracker()
+ }
+
+ if options.Headers == nil {
+ options.Headers = make(http.Header)
+ } else {
+ // make a copy of the headers to avoid race due to concurrent map write
+ options.Headers = options.Headers.Clone()
+ }
+ if _, ok := options.Headers["User-Agent"]; !ok {
+ options.Headers.Set("User-Agent", "containerd/"+version.Version)
+ }
+
+ resolveHeader := http.Header{}
+ if _, ok := options.Headers["Accept"]; !ok {
+ // set headers for all the types we support for resolution.
+ resolveHeader.Set("Accept", strings.Join([]string{
+ images.MediaTypeDockerSchema2Manifest,
+ images.MediaTypeDockerSchema2ManifestList,
+ ocispec.MediaTypeImageManifest,
+ ocispec.MediaTypeImageIndex, "*/*",
+ }, ", "))
+ } else {
+ resolveHeader["Accept"] = options.Headers["Accept"]
+ delete(options.Headers, "Accept")
+ }
+
+ if options.Hosts == nil {
+ opts := []RegistryOpt{}
+ if options.Host != nil {
+ opts = append(opts, WithHostTranslator(options.Host))
+ }
+
+ if options.Authorizer == nil {
+ options.Authorizer = NewDockerAuthorizer(
+ WithAuthClient(options.Client),
+ WithAuthHeader(options.Headers),
+ WithAuthCreds(options.Credentials))
+ }
+ opts = append(opts, WithAuthorizer(options.Authorizer))
+
+ if options.Client != nil {
+ opts = append(opts, WithClient(options.Client))
+ }
+ if options.PlainHTTP {
+ opts = append(opts, WithPlainHTTP(MatchAllHosts))
+ } else {
+ opts = append(opts, WithPlainHTTP(MatchLocalhost))
+ }
+ options.Hosts = ConfigureDefaultRegistries(opts...)
+ }
+ return &dockerResolver{
+ hosts: options.Hosts,
+ header: options.Headers,
+ resolveHeader: resolveHeader,
+ tracker: options.Tracker,
+ }
+}
+
+func getManifestMediaType(resp *http.Response) string {
+ // Strip encoding data (manifests should always be ascii JSON)
+ contentType := resp.Header.Get("Content-Type")
+ if sp := strings.IndexByte(contentType, ';'); sp != -1 {
+ contentType = contentType[0:sp]
+ }
+
+ // As of Apr 30 2019 the registry.access.redhat.com registry does not specify
+ // the content type of any data but uses schema1 manifests.
+ if contentType == "text/plain" {
+ contentType = images.MediaTypeDockerSchema1Manifest
+ }
+ return contentType
+}
+
+type countingReader struct {
+ reader io.Reader
+ bytesRead int64
+}
+
+func (r *countingReader) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ r.bytesRead += int64(n)
+ return n, err
+}
+
+var _ remotes.Resolver = &dockerResolver{}
+
+func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
+ base, err := r.resolveDockerBase(ref)
+ if err != nil {
+ return "", ocispec.Descriptor{}, err
+ }
+ refspec := base.refspec
+ if refspec.Object == "" {
+ return "", ocispec.Descriptor{}, reference.ErrObjectRequired
+ }
+
+ var (
+ paths [][]string
+ dgst = refspec.Digest()
+ caps = HostCapabilityPull
+ )
+
+ if dgst != "" {
+ if err := dgst.Validate(); err != nil {
+ // need to fail here, since we can't actually resolve the invalid
+ // digest.
+ return "", ocispec.Descriptor{}, err
+ }
+
+ // turns out, we have a valid digest, make a url.
+ paths = append(paths, []string{"manifests", dgst.String()})
+
+ // fallback to blobs on not found.
+ paths = append(paths, []string{"blobs", dgst.String()})
+ } else {
+ // Add
+ paths = append(paths, []string{"manifests", refspec.Object})
+ caps |= HostCapabilityResolve
+ }
+
+ hosts := base.filterHosts(caps)
+ if len(hosts) == 0 {
+ return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound)
+ }
+
+ ctx, err = ContextWithRepositoryScope(ctx, refspec, false)
+ if err != nil {
+ return "", ocispec.Descriptor{}, err
+ }
+
+ var (
+ // firstErr is the most relevant error encountered during resolution.
+ // We use this to determine the error to return, making sure that the
+ // error created furthest through the resolution process is returned.
+ firstErr error
+ firstErrPriority int
+ )
+ for _, u := range paths {
+ for _, host := range hosts {
+ ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host))
+
+ req := base.request(host, http.MethodHead, u...)
+ if err := req.addNamespace(base.refspec.Hostname()); err != nil {
+ return "", ocispec.Descriptor{}, err
+ }
+
+ for key, value := range r.resolveHeader {
+ req.header[key] = append(req.header[key], value...)
+ }
+
+ log.G(ctx).Debug("resolving")
+ resp, err := req.doWithRetries(ctx, nil)
+ if err != nil {
+ if errors.Is(err, ErrInvalidAuthorization) {
+ err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err)
+ }
+ if firstErrPriority < 1 {
+ firstErr = err
+ firstErrPriority = 1
+ }
+ log.G(ctx).WithError(err).Info("trying next host")
+ continue // try another host
+ }
+ resp.Body.Close() // don't care about body contents.
+
+ if resp.StatusCode > 299 {
+ if resp.StatusCode == http.StatusNotFound {
+ if firstErrPriority < 2 {
+ firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
+ firstErrPriority = 2
+ }
+ log.G(ctx).Info("trying next host - response was http.StatusNotFound")
+ continue
+ }
+ if resp.StatusCode > 399 {
+ if firstErrPriority < 3 {
+ firstErr = remoteerrors.NewUnexpectedStatusErr(resp)
+ firstErrPriority = 3
+ }
+ log.G(ctx).Infof("trying next host - response was %s", resp.Status)
+ continue // try another host
+ }
+ return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp)
+ }
+ size := resp.ContentLength
+ contentType := getManifestMediaType(resp)
+
+ // if no digest was provided, then only a resolve
+ // trusted registry was contacted, in this case use
+ // the digest header (or content from GET)
+ if dgst == "" {
+ // this is the only point at which we trust the registry. we use the
+ // content headers to assemble a descriptor for the name. when this becomes
+ // more robust, we mostly get this information from a secure trust store.
+ dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
+
+ if dgstHeader != "" && size != -1 {
+ if err := dgstHeader.Validate(); err != nil {
+ return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err)
+ }
+ dgst = dgstHeader
+ }
+ }
+ if dgst == "" || size == -1 {
+ log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead")
+
+ req = base.request(host, http.MethodGet, u...)
+ if err := req.addNamespace(base.refspec.Hostname()); err != nil {
+ return "", ocispec.Descriptor{}, err
+ }
+
+ for key, value := range r.resolveHeader {
+ req.header[key] = append(req.header[key], value...)
+ }
+
+ resp, err := req.doWithRetries(ctx, nil)
+ if err != nil {
+ return "", ocispec.Descriptor{}, err
+ }
+
+ bodyReader := countingReader{reader: resp.Body}
+
+ contentType = getManifestMediaType(resp)
+ err = func() error {
+ defer resp.Body.Close()
+ if dgst != "" {
+ _, err = io.Copy(io.Discard, &bodyReader)
+ return err
+ }
+
+ if contentType == images.MediaTypeDockerSchema1Manifest {
+ b, err := schema1.ReadStripSignature(&bodyReader)
+ if err != nil {
+ return err
+ }
+
+ dgst = digest.FromBytes(b)
+ return nil
+ }
+
+ dgst, err = digest.FromReader(&bodyReader)
+ return err
+ }()
+ if err != nil {
+ return "", ocispec.Descriptor{}, err
+ }
+ size = bodyReader.bytesRead
+ }
+ // Prevent resolving to excessively large manifests
+ if size > MaxManifestSize {
+ if firstErrPriority < 4 {
+ firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound)
+ firstErrPriority = 4
+ }
+ continue
+ }
+
+ desc := ocispec.Descriptor{
+ Digest: dgst,
+ MediaType: contentType,
+ Size: size,
+ }
+
+ log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
+ return ref, desc, nil
+ }
+ }
+
+ // If above loop terminates without return or error, then no registries
+ // were provided.
+ if firstErr == nil {
+ firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
+ }
+
+ return "", ocispec.Descriptor{}, firstErr
+}
+
+func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
+ base, err := r.resolveDockerBase(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return dockerFetcher{
+ dockerBase: base,
+ }, nil
+}
+
+func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
+ base, err := r.resolveDockerBase(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return dockerPusher{
+ dockerBase: base,
+ object: base.refspec.Object,
+ tracker: r.tracker,
+ }, nil
+}
+
+func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) {
+ refspec, err := reference.Parse(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return r.base(refspec)
+}
+
+type dockerBase struct {
+ refspec reference.Spec
+ repository string
+ hosts []RegistryHost
+ header http.Header
+}
+
+func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
+ host := refspec.Hostname()
+ hosts, err := r.hosts(host)
+ if err != nil {
+ return nil, err
+ }
+ return &dockerBase{
+ refspec: refspec,
+ repository: strings.TrimPrefix(refspec.Locator, host+"/"),
+ hosts: hosts,
+ header: r.header,
+ }, nil
+}
+
+func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) {
+ for _, host := range r.hosts {
+ if host.Capabilities.Has(caps) {
+ hosts = append(hosts, host)
+ }
+ }
+ return
+}
+
+func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request {
+ header := r.header.Clone()
+ if header == nil {
+ header = http.Header{}
+ }
+
+ for key, value := range host.Header {
+ header[key] = append(header[key], value...)
+ }
+ parts := append([]string{"/", host.Path, r.repository}, ps...)
+ p := path.Join(parts...)
+ // Join strips trailing slash, re-add ending "/" if included
+ if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") {
+ p = p + "/"
+ }
+ return &request{
+ method: method,
+ path: p,
+ header: header,
+ host: host,
+ }
+}
+
+func (r *request) authorize(ctx context.Context, req *http.Request) error {
+ // Check if has header for host
+ if r.host.Authorizer != nil {
+ if err := r.host.Authorizer.Authorize(ctx, req); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *request) addNamespace(ns string) (err error) {
+ if !r.host.isProxy(ns) {
+ return nil
+ }
+ var q url.Values
+ // Parse query
+ if i := strings.IndexByte(r.path, '?'); i > 0 {
+ r.path = r.path[:i+1]
+ q, err = url.ParseQuery(r.path[i+1:])
+ if err != nil {
+ return
+ }
+ } else {
+ r.path = r.path + "?"
+ q = url.Values{}
+ }
+ q.Add("ns", ns)
+
+ r.path = r.path + q.Encode()
+
+ return
+}
+
+type request struct {
+ method string
+ path string
+ header http.Header
+ host RegistryHost
+ body func() (io.ReadCloser, error)
+ size int64
+}
+
+func (r *request) do(ctx context.Context) (*http.Response, error) {
+ u := r.host.Scheme + "://" + r.host.Host + r.path
+ req, err := http.NewRequestWithContext(ctx, r.method, u, nil)
+ if err != nil {
+ return nil, err
+ }
+ if r.header == nil {
+ req.Header = http.Header{}
+ } else {
+ req.Header = r.header.Clone() // headers need to be copied to avoid concurrent map access
+ }
+ if r.body != nil {
+ body, err := r.body()
+ if err != nil {
+ return nil, err
+ }
+ req.Body = body
+ req.GetBody = r.body
+ if r.size > 0 {
+ req.ContentLength = r.size
+ }
+ }
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
+ log.G(ctx).WithFields(requestFields(req)).Debug("do request")
+ if err := r.authorize(ctx, req); err != nil {
+ return nil, fmt.Errorf("failed to authorize: %w", err)
+ }
+
+ client := &http.Client{}
+ if r.host.Client != nil {
+ *client = *r.host.Client
+ }
+ if client.CheckRedirect == nil {
+ client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+ if err := r.authorize(ctx, req); err != nil {
+ return fmt.Errorf("failed to authorize redirect: %w", err)
+ }
+ return nil
+ }
+ }
+
+ tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "HTTPRequest"))
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to do request: %w", err)
+ }
+ log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
+ return resp, nil
+}
+
+func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) {
+ resp, err := r.do(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ responses = append(responses, resp)
+ retry, err := r.retryRequest(ctx, responses)
+ if err != nil {
+ resp.Body.Close()
+ return nil, err
+ }
+ if retry {
+ resp.Body.Close()
+ return r.doWithRetries(ctx, responses)
+ }
+ return resp, err
+}
+
+func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) {
+ if len(responses) > 5 {
+ return false, nil
+ }
+ last := responses[len(responses)-1]
+ switch last.StatusCode {
+ case http.StatusUnauthorized:
+ log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
+ if r.host.Authorizer != nil {
+ if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil {
+ return true, nil
+ } else if !errdefs.IsNotImplemented(err) {
+ return false, err
+ }
+ }
+
+ return false, nil
+ case http.StatusMethodNotAllowed:
+ // Support registries which have not properly implemented the HEAD method for
+ // manifests endpoint
+ if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") {
+ r.method = http.MethodGet
+ return true, nil
+ }
+ case http.StatusRequestTimeout, http.StatusTooManyRequests:
+ return true, nil
+ }
+
+ // TODO: Handle 50x errors accounting for attempt history
+ return false, nil
+}
+
+func (r *request) String() string {
+ return r.host.Scheme + "://" + r.host.Host + r.path
+}
+
+func requestFields(req *http.Request) log.Fields {
+ fields := map[string]interface{}{
+ "request.method": req.Method,
+ }
+ for k, vals := range req.Header {
+ k = strings.ToLower(k)
+ if k == "authorization" {
+ continue
+ }
+ for i, v := range vals {
+ field := "request.header." + k
+ if i > 0 {
+ field = fmt.Sprintf("%s.%d", field, i)
+ }
+ fields[field] = v
+ }
+ }
+
+ return fields
+}
+
+func responseFields(resp *http.Response) log.Fields {
+ fields := map[string]interface{}{
+ "response.status": resp.Status,
+ }
+ for k, vals := range resp.Header {
+ k = strings.ToLower(k)
+ for i, v := range vals {
+ field := "response.header." + k
+ if i > 0 {
+ field = fmt.Sprintf("%s.%d", field, i)
+ }
+ fields[field] = v
+ }
+ }
+
+ return fields
+}
+
+// IsLocalhost checks if the registry host is local.
+func IsLocalhost(host string) bool {
+ if h, _, err := net.SplitHostPort(host); err == nil {
+ host = h
+ }
+
+ if host == "localhost" {
+ return true
+ }
+
+ ip := net.ParseIP(host)
+ return ip.IsLoopback()
+}
+
+// NewHTTPFallback returns http.RoundTripper which allows fallback from https to
+// http for registry endpoints with configurations for both http and TLS,
+// such as defaulted localhost endpoints.
+func NewHTTPFallback(transport http.RoundTripper) http.RoundTripper {
+ return &httpFallback{
+ super: transport,
+ }
+}
+
+type httpFallback struct {
+ super http.RoundTripper
+ host string
+ mu sync.Mutex
+}
+
+func (f *httpFallback) RoundTrip(r *http.Request) (*http.Response, error) {
+ f.mu.Lock()
+ fallback := f.host == r.URL.Host
+ f.mu.Unlock()
+
+ // only fall back if the same host had previously fell back
+ if !fallback {
+ resp, err := f.super.RoundTrip(r)
+ if !isTLSError(err) && !isPortError(err, r.URL.Host) {
+ return resp, err
+ }
+ }
+
+ plainHTTPUrl := *r.URL
+ plainHTTPUrl.Scheme = "http"
+
+ plainHTTPRequest := *r
+ plainHTTPRequest.URL = &plainHTTPUrl
+
+ if !fallback {
+ f.mu.Lock()
+ if f.host != r.URL.Host {
+ f.host = r.URL.Host
+ }
+ f.mu.Unlock()
+
+ // update body on the second attempt
+ if r.Body != nil && r.GetBody != nil {
+ body, err := r.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ plainHTTPRequest.Body = body
+ }
+ }
+
+ return f.super.RoundTrip(&plainHTTPRequest)
+}
+
+func isTLSError(err error) bool {
+ if err == nil {
+ return false
+ }
+ var tlsErr tls.RecordHeaderError
+ if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" {
+ return true
+ }
+ if strings.Contains(err.Error(), "TLS handshake timeout") {
+ return true
+ }
+
+ return false
+}
+
+func isPortError(err error, host string) bool {
+ if isConnError(err) || os.IsTimeout(err) {
+ if _, port, _ := net.SplitHostPort(host); port != "" {
+ // Port is specified, will not retry on different port with scheme change
+ return false
+ }
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_unix.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_unix.go
new file mode 100644
index 000000000000..4ef0e0062a3a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_unix.go
@@ -0,0 +1,28 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "errors"
+ "syscall"
+)
+
+func isConnError(err error) bool {
+ return errors.Is(err, syscall.ECONNREFUSED)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_windows.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_windows.go
new file mode 100644
index 000000000000..9c98df04bb52
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_windows.go
@@ -0,0 +1,30 @@
+//go:build windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "errors"
+ "syscall"
+
+ "golang.org/x/sys/windows"
+)
+
+func isConnError(err error) bool {
+ return errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, windows.WSAECONNREFUSED)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go
new file mode 100644
index 000000000000..e724e4e55abc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go
@@ -0,0 +1,626 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1.
+//
+// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
+package schema1
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/containerd/v2/pkg/archive/compression"
+ "github.com/containerd/containerd/v2/pkg/deprecation"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ digest "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/image-spec/specs-go"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ manifestSizeLimit = 8e6 // 8MB
+ labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer"
+)
+
+type blobState struct {
+ diffID digest.Digest
+ empty bool
+}
+
+// Converter converts schema1 manifests to schema2 on fetch
+type Converter struct {
+ contentStore content.Store
+ fetcher remotes.Fetcher
+
+ pulledManifest *manifest
+
+ mu sync.Mutex
+ blobMap map[digest.Digest]blobState
+ layerBlobs map[digest.Digest]ocispec.Descriptor
+}
+
+var ErrDisabled = fmt.Errorf("Pulling Schema 1 images have been deprecated and disabled by default since containerd v2.0. "+
+ "As a workaround you may set an environment variable `%s=1`, but this will be completely removed in containerd v2.1.",
+ deprecation.EnvPullSchema1Image)
+
+// NewConverter returns a new converter
+func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) (*Converter, error) {
+ s := os.Getenv(deprecation.EnvPullSchema1Image)
+ if s == "" {
+ return nil, ErrDisabled
+ }
+ enable, err := strconv.ParseBool(s)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse `%s=%s`: %w", deprecation.EnvPullSchema1Image, s, err)
+ }
+ if !enable {
+ return nil, ErrDisabled
+ }
+ log.L.Warn(ErrDisabled)
+ return &Converter{
+ contentStore: contentStore,
+ fetcher: fetcher,
+ blobMap: map[digest.Digest]blobState{},
+ layerBlobs: map[digest.Digest]ocispec.Descriptor{},
+ }, nil
+}
+
+// Handle fetching descriptors for a docker media type
+func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ switch desc.MediaType {
+ case images.MediaTypeDockerSchema1Manifest:
+ if err := c.fetchManifest(ctx, desc); err != nil {
+ return nil, err
+ }
+
+ m := c.pulledManifest
+ if len(m.FSLayers) != len(m.History) {
+ return nil, errors.New("invalid schema 1 manifest, history and layer mismatch")
+ }
+ descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers))
+
+ for i := range m.FSLayers {
+ if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok {
+ empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility))
+ if err != nil {
+ return nil, err
+ }
+
+ // Do no attempt to download a known empty blob
+ if !empty {
+ descs = append([]ocispec.Descriptor{
+ {
+ MediaType: images.MediaTypeDockerSchema2LayerGzip,
+ Digest: c.pulledManifest.FSLayers[i].BlobSum,
+ Size: -1,
+ },
+ }, descs...)
+ }
+ c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{
+ empty: empty,
+ }
+ }
+ }
+ return descs, nil
+ case images.MediaTypeDockerSchema2LayerGzip:
+ if c.pulledManifest == nil {
+ return nil, errors.New("manifest required for schema 1 blob pull")
+ }
+ return nil, c.fetchBlob(ctx, desc)
+ default:
+ return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType)
+ }
+}
+
+// ConvertOptions provides options on converting a docker schema1 manifest.
+type ConvertOptions struct {
+ // ManifestMediaType specifies the media type of the manifest OCI descriptor.
+ ManifestMediaType string
+
+ // ConfigMediaType specifies the media type of the manifest config OCI
+ // descriptor.
+ ConfigMediaType string
+}
+
+// ConvertOpt allows configuring a convert operation.
+type ConvertOpt func(context.Context, *ConvertOptions) error
+
+// UseDockerSchema2 is used to indicate that a schema1 manifest should be
+// converted into the media types for a docker schema2 manifest.
+func UseDockerSchema2() ConvertOpt {
+ return func(ctx context.Context, o *ConvertOptions) error {
+ o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest
+ o.ConfigMediaType = images.MediaTypeDockerSchema2Config
+ return nil
+ }
+}
+
+// Convert a docker manifest to an OCI descriptor
+func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) {
+ co := ConvertOptions{
+ ManifestMediaType: ocispec.MediaTypeImageManifest,
+ ConfigMediaType: ocispec.MediaTypeImageConfig,
+ }
+ for _, opt := range opts {
+ if err := opt(ctx, &co); err != nil {
+ return ocispec.Descriptor{}, err
+ }
+ }
+
+ history, diffIDs, err := c.schema1ManifestHistory()
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err)
+ }
+
+ var img ocispec.Image
+ if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err)
+ }
+
+ img.History = history
+ img.RootFS = ocispec.RootFS{
+ Type: "layers",
+ DiffIDs: diffIDs,
+ }
+
+ b, err := json.MarshalIndent(img, "", " ")
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
+ }
+
+ config := ocispec.Descriptor{
+ MediaType: co.ConfigMediaType,
+ Digest: digest.Canonical.FromBytes(b),
+ Size: int64(len(b)),
+ }
+
+ layers := make([]ocispec.Descriptor, len(diffIDs))
+ for i, diffID := range diffIDs {
+ layers[i] = c.layerBlobs[diffID]
+ }
+
+ manifest := ocispec.Manifest{
+ Versioned: specs.Versioned{
+ SchemaVersion: 2,
+ },
+ Config: config,
+ Layers: layers,
+ }
+
+ mb, err := json.MarshalIndent(manifest, "", " ")
+ if err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
+ }
+
+ desc := ocispec.Descriptor{
+ MediaType: co.ManifestMediaType,
+ Digest: digest.Canonical.FromBytes(mb),
+ Size: int64(len(mb)),
+ }
+
+ labels := map[string]string{}
+ labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String()
+ for i, ch := range manifest.Layers {
+ labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String()
+ }
+
+ ref := remotes.MakeRefKey(ctx, desc)
+ if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err)
+ }
+
+ ref = remotes.MakeRefKey(ctx, config)
+ if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
+ return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err)
+ }
+
+ return desc, nil
+}
+
+// ReadStripSignature reads in a schema1 manifest and returns a byte array
+// with the "signatures" field stripped
+func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) {
+ b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB
+ if err != nil {
+ return nil, err
+ }
+
+ return stripSignature(b)
+}
+
+func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error {
+ log.G(ctx).Debug("fetch schema 1")
+
+ rc, err := c.fetcher.Fetch(ctx, desc)
+ if err != nil {
+ return err
+ }
+
+ b, err := ReadStripSignature(rc)
+ rc.Close()
+ if err != nil {
+ return err
+ }
+
+ var m manifest
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ if len(m.Manifests) != 0 || len(m.Layers) != 0 {
+ return errors.New("converter: expected schema1 document but found extra keys")
+ }
+ c.pulledManifest = &m
+
+ return nil
+}
+
+func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error {
+ log.G(ctx).Debug("fetch blob")
+
+ var (
+ ref = remotes.MakeRefKey(ctx, desc)
+ calc = newBlobStateCalculator()
+ compressMethod = compression.Gzip
+ )
+
+ // size may be unknown, set to zero for content ingest
+ ingestDesc := desc
+ if ingestDesc.Size == -1 {
+ ingestDesc.Size = 0
+ }
+
+ cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc))
+ if err != nil {
+ if !errdefs.IsAlreadyExists(err) {
+ return err
+ }
+
+ reuse, err := c.reuseLabelBlobState(ctx, desc)
+ if err != nil {
+ return err
+ }
+
+ if reuse {
+ return nil
+ }
+
+ ra, err := c.contentStore.ReaderAt(ctx, desc)
+ if err != nil {
+ return err
+ }
+ defer ra.Close()
+
+ r, err := compression.DecompressStream(content.NewReader(ra))
+ if err != nil {
+ return err
+ }
+
+ compressMethod = r.GetCompression()
+ _, err = io.Copy(calc, r)
+ r.Close()
+ if err != nil {
+ return err
+ }
+ } else {
+ defer cw.Close()
+
+ rc, err := c.fetcher.Fetch(ctx, desc)
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+
+ eg, _ := errgroup.WithContext(ctx)
+ pr, pw := io.Pipe()
+
+ eg.Go(func() error {
+ r, err := compression.DecompressStream(pr)
+ if err != nil {
+ return err
+ }
+
+ compressMethod = r.GetCompression()
+ _, err = io.Copy(calc, r)
+ r.Close()
+ pr.CloseWithError(err)
+ return err
+ })
+
+ eg.Go(func() error {
+ defer pw.Close()
+
+ return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest)
+ })
+
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+ }
+
+ if desc.Size == -1 {
+ info, err := c.contentStore.Info(ctx, desc.Digest)
+ if err != nil {
+ return fmt.Errorf("failed to get blob info: %w", err)
+ }
+ desc.Size = info.Size
+ }
+
+ if compressMethod == compression.Uncompressed {
+ log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob")
+ desc.MediaType = images.MediaTypeDockerSchema2Layer
+ }
+
+ state := calc.State()
+
+ cinfo := content.Info{
+ Digest: desc.Digest,
+ Labels: map[string]string{
+ labels.LabelUncompressed: state.diffID.String(),
+ labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty),
+ },
+ }
+
+ if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil {
+ return fmt.Errorf("failed to update uncompressed label: %w", err)
+ }
+
+ c.mu.Lock()
+ c.blobMap[desc.Digest] = state
+ c.layerBlobs[state.diffID] = desc
+ c.mu.Unlock()
+
+ return nil
+}
+
+func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) {
+ cinfo, err := c.contentStore.Info(ctx, desc.Digest)
+ if err != nil {
+ return false, fmt.Errorf("failed to get blob info: %w", err)
+ }
+ desc.Size = cinfo.Size
+
+ diffID, ok := cinfo.Labels[labels.LabelUncompressed]
+ if !ok {
+ return false, nil
+ }
+
+ emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer]
+ if !ok {
+ return false, nil
+ }
+
+ isEmpty, err := strconv.ParseBool(emptyVal)
+ if err != nil {
+ log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty)
+ return false, nil
+ }
+
+ bState := blobState{empty: isEmpty}
+
+ if bState.diffID, err = digest.Parse(diffID); err != nil {
+ log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID)
+ return false, nil
+ }
+
+ // NOTE: there is no need to read header to get compression method
+ // because there are only two kinds of methods.
+ if bState.diffID == desc.Digest {
+ desc.MediaType = images.MediaTypeDockerSchema2Layer
+ } else {
+ desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
+ }
+
+ c.mu.Lock()
+ c.blobMap[desc.Digest] = bState
+ c.layerBlobs[bState.diffID] = desc
+ c.mu.Unlock()
+ return true, nil
+}
+
+func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) {
+ if c.pulledManifest == nil {
+ return nil, nil, errors.New("missing schema 1 manifest for conversion")
+ }
+ m := *c.pulledManifest
+
+ if len(m.History) == 0 {
+ return nil, nil, errors.New("no history")
+ }
+
+ history := make([]ocispec.History, len(m.History))
+ diffIDs := []digest.Digest{}
+ for i := range m.History {
+ var h v1History
+ if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err)
+ }
+
+ blobSum := m.FSLayers[i].BlobSum
+
+ state := c.blobMap[blobSum]
+
+ history[len(history)-i-1] = ocispec.History{
+ Author: h.Author,
+ Comment: h.Comment,
+ Created: &h.Created,
+ CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "),
+ EmptyLayer: state.empty,
+ }
+
+ if !state.empty {
+ diffIDs = append([]digest.Digest{state.diffID}, diffIDs...)
+
+ }
+ }
+
+ return history, diffIDs, nil
+}
+
+type fsLayer struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+type history struct {
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+type manifest struct {
+ FSLayers []fsLayer `json:"fsLayers"`
+ History []history `json:"history"`
+ Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest
+ Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index
+}
+
+type v1History struct {
+ Author string `json:"author,omitempty"`
+ Created time.Time `json:"created"`
+ Comment string `json:"comment,omitempty"`
+ ThrowAway *bool `json:"throwaway,omitempty"`
+ Size *int `json:"Size,omitempty"` // used before ThrowAway field
+ ContainerConfig struct {
+ Cmd []string `json:"Cmd,omitempty"`
+ } `json:"container_config,omitempty"`
+}
+
+// isEmptyLayer returns whether the v1 compatibility history describes an
+// empty layer. A return value of true indicates the layer is empty,
+// however false does not indicate non-empty.
+func isEmptyLayer(compatHistory []byte) (bool, error) {
+ var h v1History
+ if err := json.Unmarshal(compatHistory, &h); err != nil {
+ return false, err
+ }
+
+ if h.ThrowAway != nil {
+ return *h.ThrowAway, nil
+ }
+ if h.Size != nil {
+ return *h.Size == 0, nil
+ }
+
+ // If no `Size` or `throwaway` field is given, then
+ // it cannot be determined whether the layer is empty
+ // from the history, return false
+ return false, nil
+}
+
+type signature struct {
+ Signatures []jsParsedSignature `json:"signatures"`
+}
+
+type jsParsedSignature struct {
+ Protected string `json:"protected"`
+}
+
+type protectedBlock struct {
+ Length int `json:"formatLength"`
+ Tail string `json:"formatTail"`
+}
+
+// joseBase64UrlDecode decodes the given string using the standard base64 url
+// decoder but first adds the appropriate number of trailing '=' characters in
+// accordance with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlDecode(s string) ([]byte, error) {
+ switch len(s) % 4 {
+ case 0:
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ default:
+ return nil, errors.New("illegal base64url string")
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
+
+func stripSignature(b []byte) ([]byte, error) {
+ var sig signature
+ if err := json.Unmarshal(b, &sig); err != nil {
+ return nil, err
+ }
+ if len(sig.Signatures) == 0 {
+ return nil, errors.New("no signatures")
+ }
+ pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected)
+ if err != nil {
+ return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err)
+ }
+
+ var protected protectedBlock
+ if err := json.Unmarshal(pb, &protected); err != nil {
+ return nil, err
+ }
+
+ if protected.Length > len(b) {
+ return nil, errors.New("invalid protected length block")
+ }
+
+ tail, err := joseBase64UrlDecode(protected.Tail)
+ if err != nil {
+ return nil, fmt.Errorf("invalid tail base 64 value: %w", err)
+ }
+
+ return append(b[:protected.Length], tail...), nil
+}
+
+type blobStateCalculator struct {
+ empty bool
+ digester digest.Digester
+}
+
+func newBlobStateCalculator() *blobStateCalculator {
+ return &blobStateCalculator{
+ empty: true,
+ digester: digest.Canonical.Digester(),
+ }
+}
+
+func (c *blobStateCalculator) Write(p []byte) (int, error) {
+ if c.empty {
+ for _, b := range p {
+ if b != 0x00 {
+ c.empty = false
+ break
+ }
+ }
+ }
+ return c.digester.Hash().Write(p)
+}
+
+func (c *blobStateCalculator) State() blobState {
+ return blobState{
+ empty: c.empty,
+ diffID: c.digester.Digest(),
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go
new file mode 100644
index 000000000000..8135498bd9eb
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go
@@ -0,0 +1,101 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "sort"
+ "strings"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+)
+
+// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull"
+// for "host/foo/bar:baz".
+// When push is true, both pull and push are added to the scope.
+func RepositoryScope(refspec reference.Spec, push bool) (string, error) {
+ u, err := url.Parse("dummy://" + refspec.Locator)
+ if err != nil {
+ return "", err
+ }
+ s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull"
+ if push {
+ s += ",push"
+ }
+ return s, nil
+}
+
+// tokenScopesKey is used for the key for context.WithValue().
+// value: []string (e.g. {"registry:foo/bar:pull"})
+type tokenScopesKey struct{}
+
+// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
+func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
+ s, err := RepositoryScope(refspec, push)
+ if err != nil {
+ return nil, err
+ }
+ return WithScope(ctx, s), nil
+}
+
+// WithScope appends a custom registry auth scope to the context.
+func WithScope(ctx context.Context, scope string) context.Context {
+ var scopes []string
+ if v := ctx.Value(tokenScopesKey{}); v != nil {
+ scopes = v.([]string)
+ scopes = append(scopes, scope)
+ } else {
+ scopes = []string{scope}
+ }
+ return context.WithValue(ctx, tokenScopesKey{}, scopes)
+}
+
+// ContextWithAppendPullRepositoryScope is used to append repository pull
+// scope into existing scopes indexed by the tokenScopesKey{}.
+func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context {
+ return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo))
+}
+
+// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes.
+func GetTokenScopes(ctx context.Context, common []string) []string {
+ scopes := []string{}
+ if x := ctx.Value(tokenScopesKey{}); x != nil {
+ scopes = append(scopes, x.([]string)...)
+ }
+
+ scopes = append(scopes, common...)
+ sort.Strings(scopes)
+
+ if len(scopes) == 0 {
+ return scopes
+ }
+
+ l := 0
+ for idx := 1; idx < len(scopes); idx++ {
+ // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
+ // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
+ if scopes[l] == scopes[idx] {
+ continue
+ }
+
+ l++
+ scopes[l] = scopes[idx]
+ }
+ return scopes[:l+1]
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/status.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/status.go
new file mode 100644
index 000000000000..9835525ab503
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/status.go
@@ -0,0 +1,101 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/errdefs"
+ "github.com/moby/locker"
+)
+
+// Status of a content operation
+type Status struct {
+ content.Status
+
+ Committed bool
+
+ // ErrClosed contains error encountered on close.
+ ErrClosed error
+
+ // UploadUUID is used by the Docker registry to reference blob uploads
+ UploadUUID string
+
+ // PushStatus contains status related to push.
+ PushStatus
+}
+
+type PushStatus struct {
+ // MountedFrom is the source content was cross-repo mounted from (empty if no cross-repo mount was performed).
+ MountedFrom string
+
+ // Exists indicates whether content already exists in the repository and wasn't uploaded.
+ Exists bool
+}
+
+// StatusTracker to track status of operations
+type StatusTracker interface {
+ GetStatus(string) (Status, error)
+ SetStatus(string, Status)
+}
+
+// StatusTrackLocker to track status of operations with lock
+type StatusTrackLocker interface {
+ StatusTracker
+ Lock(string)
+ Unlock(string)
+}
+
+type memoryStatusTracker struct {
+ statuses map[string]Status
+ m sync.Mutex
+ locker *locker.Locker
+}
+
+// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory
+func NewInMemoryTracker() StatusTrackLocker {
+ return &memoryStatusTracker{
+ statuses: map[string]Status{},
+ locker: locker.New(),
+ }
+}
+
+func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) {
+ t.m.Lock()
+ defer t.m.Unlock()
+ status, ok := t.statuses[ref]
+ if !ok {
+ return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound)
+ }
+ return status, nil
+}
+
+func (t *memoryStatusTracker) SetStatus(ref string, status Status) {
+ t.m.Lock()
+ t.statuses[ref] = status
+ t.m.Unlock()
+}
+
+func (t *memoryStatusTracker) Lock(ref string) {
+ t.locker.Lock(ref)
+}
+
+func (t *memoryStatusTracker) Unlock(ref string) {
+ t.locker.Unlock(ref)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go
new file mode 100644
index 000000000000..f60ff0fc286c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go
@@ -0,0 +1,55 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errors
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+)
+
+var _ error = ErrUnexpectedStatus{}
+
+// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status
+type ErrUnexpectedStatus struct {
+ Status string
+ StatusCode int
+ Body []byte
+ RequestURL, RequestMethod string
+}
+
+func (e ErrUnexpectedStatus) Error() string {
+ return fmt.Sprintf("unexpected status from %s request to %s: %s", e.RequestMethod, e.RequestURL, e.Status)
+}
+
+// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response
+func NewUnexpectedStatusErr(resp *http.Response) error {
+ var b []byte
+ if resp.Body != nil {
+ b, _ = io.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
+ }
+ err := ErrUnexpectedStatus{
+ Body: b,
+ Status: resp.Status,
+ StatusCode: resp.StatusCode,
+ RequestMethod: resp.Request.Method,
+ }
+ if resp.Request.URL != nil {
+ err.RequestURL = resp.Request.URL.String()
+ }
+ return err
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go b/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go
new file mode 100644
index 000000000000..16fcdbf84dc0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go
@@ -0,0 +1,394 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remotes
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/containerd/platforms"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sync/semaphore"
+)
+
+type refKeyPrefix struct{}
+
+// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing
+// data in the content store from the FetchHandler.
+//
+// Used in `MakeRefKey` to determine what the key prefix should be.
+func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context {
+ var values map[string]string
+ if v := ctx.Value(refKeyPrefix{}); v != nil {
+ values = v.(map[string]string)
+ } else {
+ values = make(map[string]string)
+ }
+
+ values[mediaType] = prefix
+ return context.WithValue(ctx, refKeyPrefix{}, values)
+}
+
+// MakeRefKey returns a unique reference for the descriptor. This reference can be
+// used to lookup ongoing processes related to the descriptor. This function
+// may look to the context to namespace the reference appropriately.
+func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
+ key := desc.Digest.String()
+ if desc.Annotations != nil {
+ if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok {
+ key = fmt.Sprintf("%s@%s", name, desc.Digest.String())
+ }
+ }
+
+ if v := ctx.Value(refKeyPrefix{}); v != nil {
+ values := v.(map[string]string)
+ if prefix := values[desc.MediaType]; prefix != "" {
+ return prefix + "-" + key
+ }
+ }
+
+ switch {
+ case images.IsManifestType(desc.MediaType):
+ return "manifest-" + key
+ case images.IsIndexType(desc.MediaType):
+ return "index-" + key
+ case images.IsLayerType(desc.MediaType):
+ return "layer-" + key
+ case images.IsKnownConfig(desc.MediaType):
+ return "config-" + key
+ default:
+ log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
+ return "unknown-" + key
+ }
+}
+
+// FetchHandler returns a handler that will fetch all content into the ingester
+// discovered in a call to Dispatch. Use with ChildrenHandler to do a full
+// recursive fetch.
+func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
+ "digest": desc.Digest,
+ "mediatype": desc.MediaType,
+ "size": desc.Size,
+ }))
+
+ if desc.MediaType == images.MediaTypeDockerSchema1Manifest {
+ return nil, fmt.Errorf("%v not supported", desc.MediaType)
+ }
+ err := Fetch(ctx, ingester, fetcher, desc)
+ if errdefs.IsAlreadyExists(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+}
+
+// Fetch fetches the given digest into the provided ingester
+func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
+ log.G(ctx).Debug("fetch")
+
+ cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc))
+ if err != nil {
+ return err
+ }
+ defer cw.Close()
+
+ ws, err := cw.Status()
+ if err != nil {
+ return err
+ }
+
+ if desc.Size == 0 {
+ // most likely a poorly configured registry/web front end which responded with no
+ // Content-Length header; unable (not to mention useless) to commit a 0-length entry
+ // into the content store. Error out here otherwise the error sent back is confusing
+ return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument)
+ }
+ if ws.Offset == desc.Size {
+ // If writer is already complete, commit and return
+ err := cw.Commit(ctx, desc.Size, desc.Digest)
+ if err != nil && !errdefs.IsAlreadyExists(err) {
+ return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
+ }
+ return err
+ }
+
+ if desc.Size == int64(len(desc.Data)) {
+ return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest)
+ }
+
+ rc, err := fetcher.Fetch(ctx, desc)
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+
+ return content.Copy(ctx, cw, rc, desc.Size, desc.Digest)
+}
+
+// PushHandler returns a handler that will push all content from the provider
+// using a writer from the pusher.
+func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
+ "digest": desc.Digest,
+ "mediatype": desc.MediaType,
+ "size": desc.Size,
+ }))
+
+ err := push(ctx, provider, pusher, desc)
+ return nil, err
+ }
+}
+
+func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error {
+ log.G(ctx).Debug("push")
+
+ var (
+ cw content.Writer
+ err error
+ )
+ if cs, ok := pusher.(content.Ingester); ok {
+ cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc))
+ } else {
+ cw, err = pusher.Push(ctx, desc)
+ }
+ if err != nil {
+ if !errdefs.IsAlreadyExists(err) {
+ return err
+ }
+
+ return nil
+ }
+ defer cw.Close()
+
+ ra, err := provider.ReaderAt(ctx, desc)
+ if err != nil {
+ return err
+ }
+ defer ra.Close()
+
+ rd := io.NewSectionReader(ra, 0, desc.Size)
+ return content.Copy(ctx, cw, rd, desc.Size, desc.Digest)
+}
+
+// PushContent pushes content specified by the descriptor from the provider.
+//
+// Base handlers can be provided which will be called before any push specific
+// handlers.
+//
+// If the passed in content.Provider is also a content.InfoProvider (such as
+// content.Manager) then this will also annotate the distribution sources using
+// labels prefixed with "containerd.io/distribution.source".
+func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {
+
+ var m sync.Mutex
+ manifests := []ocispec.Descriptor{}
+ indexStack := []ocispec.Descriptor{}
+
+ filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if images.IsManifestType(desc.MediaType) {
+ m.Lock()
+ manifests = append(manifests, desc)
+ m.Unlock()
+ return nil, images.ErrStopHandler
+ } else if images.IsIndexType(desc.MediaType) {
+ m.Lock()
+ indexStack = append(indexStack, desc)
+ m.Unlock()
+ return nil, images.ErrStopHandler
+ }
+ return nil, nil
+ })
+
+ pushHandler := PushHandler(pusher, store)
+
+ platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform)
+
+ var handler images.Handler
+ if m, ok := store.(content.InfoProvider); ok {
+ annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m)
+ handler = images.Handlers(annotateHandler, filterHandler, pushHandler)
+ } else {
+ handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler)
+ }
+
+ if wrapper != nil {
+ handler = wrapper(handler)
+ }
+
+ if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
+ return err
+ }
+
+ if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil {
+ return err
+ }
+
+ // Iterate in reverse order as seen, parent always uploaded after child
+ for i := len(indexStack) - 1; i >= 0; i-- {
+ err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i])
+ if err != nil {
+ // TODO(estesp): until we have a more complete method for index push, we need to report
+ // missing dependencies in an index/manifest list by sensing the "400 Bad Request"
+ // as a marker for this problem
+ if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") {
+ return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err)
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable".
+// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed.
+//
+// This is based on the media type of the content:
+// - application/vnd.oci.image.layer.nondistributable
+// - application/vnd.docker.image.rootfs.foreign
+func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ if images.IsNonDistributable(desc.MediaType) {
+ log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob")
+ return nil, images.ErrSkipDesc
+ }
+
+ children, err := f(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ if len(children) == 0 {
+ return nil, nil
+ }
+
+ out := make([]ocispec.Descriptor, 0, len(children))
+ for _, child := range children {
+ if !images.IsNonDistributable(child.MediaType) {
+ out = append(out, child)
+ } else {
+ log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob")
+ }
+ }
+ return out, nil
+ }
+}
+
+// FilterManifestByPlatformHandler allows Handler to handle non-target
+// platform's manifest and configuration data.
+func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := f(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ // no platform information
+ if desc.Platform == nil || m == nil {
+ return children, nil
+ }
+
+ if images.IsManifestType(desc.MediaType) && !m.Match(*desc.Platform) {
+ var descs []ocispec.Descriptor
+ for _, child := range children {
+ if images.IsConfigType(child.MediaType) {
+ descs = append(descs, child)
+ }
+ }
+ return descs, nil
+ }
+ return children, nil
+ }
+}
+
+// annotateDistributionSourceHandler add distribution source label into
+// annotation of config or blob descriptor.
+func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc {
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := f(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ // Distribution source is only used for config or blob but may be inherited from
+ // a manifest or manifest list
+ if !images.IsManifestType(desc.MediaType) && !images.IsIndexType(desc.MediaType) {
+ return children, nil
+ }
+
+ parentSourceAnnotations := desc.Annotations
+ var parentLabels map[string]string
+ if pi, err := provider.Info(ctx, desc.Digest); err != nil {
+ if !errdefs.IsNotFound(err) {
+ return nil, err
+ }
+ } else {
+ parentLabels = pi.Labels
+ }
+
+ for i := range children {
+ child := children[i]
+
+ info, err := provider.Info(ctx, child.Digest)
+ if err != nil {
+ if !errdefs.IsNotFound(err) {
+ return nil, err
+ }
+ }
+ copyDistributionSourceLabels(info.Labels, &child)
+
+ // Annotate with parent labels for cross repo mount or fetch.
+ // Parent sources may apply to all children since most registries
+ // enforce that children exist before the manifests.
+ copyDistributionSourceLabels(parentSourceAnnotations, &child)
+ copyDistributionSourceLabels(parentLabels, &child)
+
+ children[i] = child
+ }
+ return children, nil
+ }
+}
+
+func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) {
+ for k, v := range from {
+ if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
+ continue
+ }
+
+ if to.Annotations == nil {
+ to.Annotations = make(map[string]string)
+ } else {
+ // Only propagate the parent label if the child doesn't already have it.
+ if _, has := to.Annotations[k]; has {
+ continue
+ }
+ }
+ to.Annotations[k] = v
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go b/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go
new file mode 100644
index 000000000000..c39b93785e9b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go
@@ -0,0 +1,111 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remotes
+
+import (
+ "context"
+ "io"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Resolver provides remotes based on a locator.
+type Resolver interface {
+ // Resolve attempts to resolve the reference into a name and descriptor.
+ //
+ // The argument `ref` should be a scheme-less URI representing the remote.
+ // Structurally, it has a host and path. The "host" can be used to directly
+ // reference a specific host or be matched against a specific handler.
+ //
+ // The returned name should be used to identify the referenced entity.
+ // Depending on the remote namespace, this may be immutable or mutable.
+ // While the name may differ from ref, it should itself be a valid ref.
+ //
+ // If the resolution fails, an error will be returned.
+ Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error)
+
+ // Fetcher returns a new fetcher for the provided reference.
+ // All content fetched from the returned fetcher will be
+ // from the namespace referred to by ref.
+ Fetcher(ctx context.Context, ref string) (Fetcher, error)
+
+ // Pusher returns a new pusher for the provided reference
+ // The returned Pusher should satisfy content.Ingester and concurrent attempts
+ // to push the same blob using the Ingester API should result in ErrUnavailable.
+ Pusher(ctx context.Context, ref string) (Pusher, error)
+}
+
+// Fetcher fetches content.
+// A fetcher implementation may implement the FetcherByDigest interface too.
+type Fetcher interface {
+ // Fetch the resource identified by the descriptor.
+ Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
+}
+
+// FetcherByDigest fetches content by the digest.
+type FetcherByDigest interface {
+ // FetchByDigest fetches the resource identified by the digest.
+ //
+ // FetcherByDigest usually returns an incomplete descriptor.
+ // Typically, the media type is always set to "application/octet-stream",
+ // and the annotations are unset.
+ FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error)
+}
+
+// Pusher pushes content
+type Pusher interface {
+ // Push returns a content writer for the given resource identified
+ // by the descriptor.
+ Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error)
+}
+
+// FetcherFunc allows package users to implement a Fetcher with just a
+// function.
+type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
+
+// Fetch content
+func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
+ return fn(ctx, desc)
+}
+
+// PusherFunc allows package users to implement a Pusher with just a
+// function.
+type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error)
+
+// Push content
+func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
+ return fn(ctx, desc)
+}
+
+// FetchByDigestConfig provides configuration for fetching content by digest
+type FetchByDigestConfig struct {
+ //Mediatype specifies mediatype header to append for fetch request
+ Mediatype string
+}
+
+// FetchByDigestOpts allows callers to set options for fetch object
+type FetchByDigestOpts func(context.Context, *FetchByDigestConfig) error
+
+// WithMediaType sets the media type header for fetch request
+func WithMediaType(mediatype string) FetchByDigestOpts {
+ return func(ctx context.Context, cfg *FetchByDigestConfig) error {
+ cfg.Mediatype = mediatype
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/v2/core/snapshots/snapshotter.go
new file mode 100644
index 000000000000..09a2bfc57b2b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/snapshots/snapshotter.go
@@ -0,0 +1,466 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package snapshots
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "time"
+
+ snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
+ "github.com/containerd/containerd/v2/core/mount"
+ "github.com/containerd/containerd/v2/pkg/protobuf"
+)
+
+const (
+ // UnpackKeyPrefix is the beginning of the key format used for snapshots that will have
+ // image content unpacked into them.
+ UnpackKeyPrefix = "extract"
+ // UnpackKeyFormat is the format for the snapshotter keys used for extraction
+ UnpackKeyFormat = UnpackKeyPrefix + "-%s %s"
+ inheritedLabelsPrefix = "containerd.io/snapshot/"
+ labelSnapshotRef = "containerd.io/snapshot.ref"
+
+ // LabelSnapshotUIDMapping is the label used for UID mappings
+ LabelSnapshotUIDMapping = "containerd.io/snapshot/uidmapping"
+ // LabelSnapshotGIDMapping is the label used for GID mappings
+ LabelSnapshotGIDMapping = "containerd.io/snapshot/gidmapping"
+)
+
+// Kind identifies the kind of snapshot.
+type Kind uint8
+
+// definitions of snapshot kinds
+const (
+ KindUnknown Kind = iota
+ KindView
+ KindActive
+ KindCommitted
+)
+
+// ParseKind parses the provided string into a Kind
+//
+// If the string cannot be parsed KindUnknown is returned
+func ParseKind(s string) Kind {
+ s = strings.ToLower(s)
+ switch s {
+ case "view":
+ return KindView
+ case "active":
+ return KindActive
+ case "committed":
+ return KindCommitted
+ default:
+ return KindUnknown
+ }
+}
+
+// String returns the string representation of the Kind
+func (k Kind) String() string {
+ switch k {
+ case KindView:
+ return "View"
+ case KindActive:
+ return "Active"
+ case KindCommitted:
+ return "Committed"
+ default:
+ return "Unknown"
+ }
+}
+
+// MarshalJSON the Kind to JSON
+func (k Kind) MarshalJSON() ([]byte, error) {
+ return json.Marshal(k.String())
+}
+
+// UnmarshalJSON the Kind from JSON
+func (k *Kind) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+
+ *k = ParseKind(s)
+ return nil
+}
+
+// KindToProto converts from [Kind] to the protobuf definition [snapshots.Kind].
+func KindToProto(kind Kind) snapshotsapi.Kind {
+ switch kind {
+ case KindActive:
+ return snapshotsapi.Kind_ACTIVE
+ case KindView:
+ return snapshotsapi.Kind_VIEW
+ default:
+ return snapshotsapi.Kind_COMMITTED
+ }
+}
+
+// KindFromProto converts from the protobuf definition [snapshots.Kind] to
+// [Kind].
+func KindFromProto(kind snapshotsapi.Kind) Kind {
+ switch kind {
+ case snapshotsapi.Kind_ACTIVE:
+ return KindActive
+ case snapshotsapi.Kind_VIEW:
+ return KindView
+ default:
+ return KindCommitted
+ }
+}
+
+// Info provides information about a particular snapshot.
+// JSON marshalling is supported for interacting with tools like ctr,
+type Info struct {
+ Kind Kind // active or committed snapshot
+ Name string // name or key of snapshot
+ Parent string `json:",omitempty"` // name of parent snapshot
+
+ // Labels for a snapshot.
+ //
+ // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited
+ // by the snapshotter's `Prepare`, `View`, or `Commit` calls.
+ Labels map[string]string `json:",omitempty"`
+ Created time.Time `json:",omitempty"` // Created time
+ Updated time.Time `json:",omitempty"` // Last update time
+}
+
+// InfoToProto converts from [Info] to the protobuf definition [snapshots.Info].
+func InfoToProto(info Info) *snapshotsapi.Info {
+ return &snapshotsapi.Info{
+ Name: info.Name,
+ Parent: info.Parent,
+ Kind: KindToProto(info.Kind),
+ CreatedAt: protobuf.ToTimestamp(info.Created),
+ UpdatedAt: protobuf.ToTimestamp(info.Updated),
+ Labels: info.Labels,
+ }
+}
+
+// InfoFromProto converts from the protobuf definition [snapshots.Info] to
+// [Info].
+func InfoFromProto(info *snapshotsapi.Info) Info {
+ return Info{
+ Name: info.Name,
+ Parent: info.Parent,
+ Kind: KindFromProto(info.Kind),
+ Created: protobuf.FromTimestamp(info.CreatedAt),
+ Updated: protobuf.FromTimestamp(info.UpdatedAt),
+ Labels: info.Labels,
+ }
+}
+
+// Usage defines statistics for disk resources consumed by the snapshot.
+//
+// These resources only include the resources consumed by the snapshot itself
+// and does not include resources usage by the parent.
+type Usage struct {
+ Inodes int64 // number of inodes in use.
+ Size int64 // provides usage, in bytes, of snapshot
+}
+
+// Add the provided usage to the current usage
+func (u *Usage) Add(other Usage) {
+ u.Size += other.Size
+
+ // TODO(stevvooe): assumes independent inodes, but provides an upper
+ // bound. This should be pretty close, assuming the inodes for a
+ // snapshot are roughly unique to it. Don't trust this assumption.
+ u.Inodes += other.Inodes
+}
+
+// UsageFromProto converts from the protobuf definition [snapshots.Usage] to
+// [Usage].
+func UsageFromProto(resp *snapshotsapi.UsageResponse) Usage {
+ return Usage{
+ Inodes: resp.Inodes,
+ Size: resp.Size,
+ }
+}
+
+// UsageToProto converts from [Usage] to the protobuf definition [snapshots.Usage].
+func UsageToProto(usage Usage) *snapshotsapi.UsageResponse {
+ return &snapshotsapi.UsageResponse{
+ Inodes: usage.Inodes,
+ Size: usage.Size,
+ }
+}
+
+// WalkFunc defines the callback for a snapshot walk.
+type WalkFunc func(context.Context, Info) error
+
+// Snapshotter defines the methods required to implement a snapshot snapshotter for
+// allocating, snapshotting and mounting filesystem changesets. The model works
+// by building up sets of changes with parent-child relationships.
+//
+// A snapshot represents a filesystem state. Every snapshot has a parent, where
+// the empty parent is represented by the empty string. A diff can be taken
+// between a parent and its snapshot to generate a classic layer.
+//
+// An active snapshot is created by calling `Prepare`. After mounting, changes
+// can be made to the snapshot. The act of committing creates a committed
+// snapshot. The committed snapshot will get the parent of active snapshot. The
+// committed snapshot can then be used as a parent. Active snapshots can never
+// act as a parent.
+//
+// Snapshots are best understood by their lifecycle. Active snapshots are
+// always created with Prepare or View. Committed snapshots are always created
+// with Commit. Active snapshots never become committed snapshots and vice
+// versa. All snapshots may be removed.
+//
+// For consistency, we define the following terms to be used throughout this
+// interface for snapshotter implementations:
+//
+// `ctx` - refers to a context.Context
+// `key` - refers to an active snapshot
+// `name` - refers to a committed snapshot
+// `parent` - refers to the parent in relation
+//
+// Most methods take various combinations of these identifiers. Typically,
+// `name` and `parent` will be used in cases where a method *only* takes
+// committed snapshots. `key` will be used to refer to active snapshots in most
+// cases, except where noted. All variables used to access snapshots use the
+// same key space. For example, an active snapshot may not share the same key
+// with a committed snapshot.
+//
+// We cover several examples below to demonstrate the utility of the snapshotter.
+//
+// # Importing a Layer
+//
+// To import a layer, we simply have the snapshotter provide a list of
+// mounts to be applied such that our dst will capture a changeset. We start
+// out by getting a path to the layer tar file and creating a temp location to
+// unpack it to:
+//
+// layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file.
+//
+// We start by using the snapshotter to Prepare a new snapshot transaction, using a
+// key and descending from the empty parent "". To prevent our layer from being
+// garbage collected during unpacking, we add the `containerd.io/gc.root` label:
+//
+// noGcOpt := snapshots.WithLabels(map[string]string{
+// "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+// })
+// mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt)
+// if err != nil { ... }
+//
+// We get back a list of mounts from snapshotter.Prepare(), with the key identifying
+// the active snapshot. Mount this to the temporary location with the
+// following:
+//
+// if err := mount.All(mounts, tmpDir); err != nil { ... }
+//
+// Once the mounts are performed, our temporary location is ready to capture
+// a diff. In practice, this works similar to a filesystem transaction. The
+// next step is to unpack the layer. We have a special function unpackLayer
+// that applies the contents of the layer to target location and calculates the
+// DiffID of the unpacked layer (this is a requirement for docker
+// implementation):
+//
+// layer, err := os.Open(layerPath)
+// if err != nil { ... }
+// digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location
+// if err != nil { ... }
+//
+// When the above completes, we should have a filesystem that represents the
+// contents of the layer. Careful implementations should verify that digest
+// matches the expected DiffID. When completed, we unmount the mounts:
+//
+// unmount(mounts) // optional, for now
+//
+// Now that we've verified and unpacked our layer, we commit the active
+// snapshot to a name. For this example, we are just going to use the layer
+// digest, but in practice, this will probably be the ChainID. This also removes
+// the active snapshot:
+//
+// if err := snapshotter.Commit(ctx, digest.String(), key, noGcOpt); err != nil { ... }
+//
+// Now, we have a layer in the snapshotter that can be accessed with the digest
+// provided during commit.
+//
+// # Importing the Next Layer
+//
+// Making a layer depend on the above is identical to the process described
+// above except that the parent is provided as parent when calling
+// snapshotter.Prepare(), assuming a clean, unique key identifier:
+//
+// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt)
+//
+// We then mount, apply and commit, as we did above. The new snapshot will be
+// based on the content of the previous one.
+//
+// # Running a Container
+//
+// To run a container, we simply provide snapshotter.Prepare() the committed image
+// snapshot as the parent. After mounting, the prepared path can
+// be used directly as the container's filesystem:
+//
+// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID)
+//
+// The returned mounts can then be passed directly to the container runtime. If
+// one would like to create a new image from the filesystem, snapshotter.Commit() is
+// called:
+//
+// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... }
+//
+// Alternatively, for most container runs, snapshotter.Remove() will be called to
+// signal the snapshotter to abandon the changes.
+type Snapshotter interface {
+ // Stat returns the info for an active or committed snapshot by name or
+ // key.
+ //
+ // Should be used for parent resolution, existence checks and to discern
+ // the kind of snapshot.
+ Stat(ctx context.Context, key string) (Info, error)
+
+ // Update updates the info for a snapshot.
+ //
+ // Only mutable properties of a snapshot may be updated.
+ Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
+
+ // Usage returns the resource usage of an active or committed snapshot
+ // excluding the usage of parent snapshots.
+ //
+ // The running time of this call for active snapshots is dependent on
+ // implementation, but may be proportional to the size of the resource.
+ // Callers should take this into consideration. Implementations should
+ // attempt to honor context cancellation and avoid taking locks when making
+ // the calculation.
+ Usage(ctx context.Context, key string) (Usage, error)
+
+ // Mounts returns the mounts for the active snapshot transaction identified
+ // by key. Can be called on a read-write or readonly transaction. This is
+ // available only for active snapshots.
+ //
+ // This can be used to recover mounts after calling View or Prepare.
+ Mounts(ctx context.Context, key string) ([]mount.Mount, error)
+
+ // Prepare creates an active snapshot identified by key descending from the
+ // provided parent. The returned mounts can be used to mount the snapshot
+ // to capture changes.
+ //
+ // If a parent is provided, after performing the mounts, the destination
+ // will start with the content of the parent. The parent must be a
+ // committed snapshot. Changes to the mounted destination will be captured
+ // in relation to the parent. The default parent, "", is an empty
+ // directory.
+ //
+ // The changes may be saved to a committed snapshot by calling Commit. When
+ // one is done with the transaction, Remove should be called on the key.
+ //
+ // Multiple calls to Prepare or View with the same key should fail.
+ Prepare(ctx context.Context, key, parent string, opts ...Opt) ([]mount.Mount, error)
+
+ // View behaves identically to Prepare except the result may not be
+ // committed back to the snapshot snapshotter. View returns a readonly view on
+ // the parent, with the active snapshot being tracked by the given key.
+ //
+ // This method operates identically to Prepare, except the mounts returned
+ // may have the readonly flag set. Any modifications to the underlying
+ // filesystem will be ignored. Implementations may perform this in a more
+ // efficient manner that differs from what would be attempted with
+ // `Prepare`.
+ //
+ // Commit may not be called on the provided key and will return an error.
+ // To collect the resources associated with key, Remove must be called with
+ // key as the argument.
+ View(ctx context.Context, key, parent string, opts ...Opt) ([]mount.Mount, error)
+
+ // Commit captures the changes between key and its parent into a snapshot
+ // identified by name. The name can then be used with the snapshotter's other
+ // methods to create subsequent snapshots.
+ //
+ // A committed snapshot will be created under name with the parent of the
+ // active snapshot.
+ //
+ // After commit, the snapshot identified by key is removed.
+ Commit(ctx context.Context, name, key string, opts ...Opt) error
+
+ // Remove the committed or active snapshot by the provided key.
+ //
+ // All resources associated with the key will be removed.
+ //
+ // If the snapshot is a parent of another snapshot, its children must be
+ // removed before proceeding.
+ Remove(ctx context.Context, key string) error
+
+ // Walk will call the provided function for each snapshot in the
+ // snapshotter which match the provided filters. If no filters are
+ // given all items will be walked.
+ // Filters:
+ // name
+ // parent
+ // kind (active,view,committed)
+ // labels.(label)
+ Walk(ctx context.Context, fn WalkFunc, filters ...string) error
+
+ // Close releases the internal resources.
+ //
+ // Close is expected to be called on the end of the lifecycle of the snapshotter,
+ // but not mandatory.
+ //
+ // Close returns nil when it is already closed.
+ Close() error
+}
+
+// Cleaner defines a type capable of performing asynchronous resource cleanup.
+// The Cleaner interface should be used by snapshotters which implement fast
+// removal and deferred resource cleanup. This prevents snapshots from needing
+// to perform lengthy resource cleanup before acknowledging a snapshot key
+// has been removed and available for re-use. This is also useful when
+// performing multi-key removal with the intent of cleaning up all the
+// resources after each snapshot key has been removed.
+type Cleaner interface {
+ Cleanup(ctx context.Context) error
+}
+
+// Opt allows setting mutable snapshot properties on creation
+type Opt func(info *Info) error
+
+// WithLabels appends labels to a created snapshot
+func WithLabels(labels map[string]string) Opt {
+ return func(info *Info) error {
+ if info.Labels == nil {
+ info.Labels = make(map[string]string)
+ }
+
+ for k, v := range labels {
+ info.Labels[k] = v
+ }
+
+ return nil
+ }
+}
+
+// FilterInheritedLabels filters the provided labels by removing any key which
+// isn't a snapshot label. Snapshot labels have a prefix of "containerd.io/snapshot/"
+// or are the "containerd.io/snapshot.ref" label.
+func FilterInheritedLabels(labels map[string]string) map[string]string {
+ if labels == nil {
+ return nil
+ }
+
+ filtered := make(map[string]string)
+ for k, v := range labels {
+ if k == labelSnapshotRef || strings.HasPrefix(k, inheritedLabelsPrefix) {
+ filtered[k] = v
+ }
+ }
+ return filtered
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/snapshots/storage/bolt.go b/vendor/github.com/containerd/containerd/v2/core/snapshots/storage/bolt.go
new file mode 100644
index 000000000000..728fb0df7c61
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/snapshots/storage/bolt.go
@@ -0,0 +1,649 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package storage
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/metadata/boltutil"
+ "github.com/containerd/containerd/v2/core/snapshots"
+ "github.com/containerd/containerd/v2/pkg/filters"
+ "github.com/containerd/errdefs"
+ bolt "go.etcd.io/bbolt"
+)
+
+var (
+ bucketKeyStorageVersion = []byte("v1")
+ bucketKeySnapshot = []byte("snapshots")
+ bucketKeyParents = []byte("parents")
+
+ bucketKeyID = []byte("id")
+ bucketKeyParent = []byte("parent")
+ bucketKeyKind = []byte("kind")
+ bucketKeyInodes = []byte("inodes")
+ bucketKeySize = []byte("size")
+
+ // ErrNoTransaction is returned when an operation is attempted with
+ // a context which is not inside of a transaction.
+ ErrNoTransaction = errors.New("no transaction in context")
+)
+
+// parentKey returns a composite key of the parent and child identifiers. The
+// parts of the key are separated by a zero byte.
+func parentKey(parent, child uint64) []byte {
+ b := make([]byte, binary.Size([]uint64{parent, child})+1)
+ i := binary.PutUvarint(b, parent)
+ j := binary.PutUvarint(b[i+1:], child)
+ return b[0 : i+j+1]
+}
+
+// parentPrefixKey returns the parent part of the composite key with the
+// zero byte separator.
+func parentPrefixKey(parent uint64) []byte {
+ b := make([]byte, binary.Size(parent)+1)
+ i := binary.PutUvarint(b, parent)
+ return b[0 : i+1]
+}
+
+// getParentPrefix returns the first part of the composite key which
+// represents the parent identifier.
+func getParentPrefix(b []byte) uint64 {
+ parent, _ := binary.Uvarint(b)
+ return parent
+}
+
+// GetInfo returns the snapshot Info directly from the metadata. Requires a
+// context with a storage transaction.
+func GetInfo(ctx context.Context, key string) (string, snapshots.Info, snapshots.Usage, error) {
+ var (
+ id uint64
+ su snapshots.Usage
+ si = snapshots.Info{
+ Name: key,
+ }
+ )
+ err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ getUsage(bkt, &su)
+ return readSnapshot(bkt, &id, &si)
+ })
+ if err != nil {
+ return "", snapshots.Info{}, snapshots.Usage{}, err
+ }
+
+ return strconv.FormatUint(id, 10), si, su, nil
+}
+
+// UpdateInfo updates an existing snapshot info's data
+func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
+ updated := snapshots.Info{
+ Name: info.Name,
+ }
+ err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ sbkt := bkt.Bucket([]byte(info.Name))
+ if sbkt == nil {
+ return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound)
+ }
+ if err := readSnapshot(sbkt, nil, &updated); err != nil {
+ return err
+ }
+
+ if len(fieldpaths) > 0 {
+ for _, path := range fieldpaths {
+ if strings.HasPrefix(path, "labels.") {
+ if updated.Labels == nil {
+ updated.Labels = map[string]string{}
+ }
+
+ key := strings.TrimPrefix(path, "labels.")
+ updated.Labels[key] = info.Labels[key]
+ continue
+ }
+
+ switch path {
+ case "labels":
+ updated.Labels = info.Labels
+ default:
+ return fmt.Errorf("cannot update %q field on snapshot %q: %w", path, info.Name, errdefs.ErrInvalidArgument)
+ }
+ }
+ } else {
+ // Set mutable fields
+ updated.Labels = info.Labels
+ }
+ updated.Updated = time.Now().UTC()
+ if err := boltutil.WriteTimestamps(sbkt, updated.Created, updated.Updated); err != nil {
+ return err
+ }
+
+ return boltutil.WriteLabels(sbkt, updated.Labels)
+ })
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+ return updated, nil
+}
+
+// WalkInfo iterates through all metadata Info for the stored snapshots and
+// calls the provided function for each. Requires a context with a storage
+// transaction.
+func WalkInfo(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error {
+ filter, err := filters.ParseAll(fs...)
+ if err != nil {
+ return err
+ }
+ // TODO: allow indexes (name, parent, specific labels)
+ return withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ return bkt.ForEach(func(k, v []byte) error {
+ // skip non buckets
+ if v != nil {
+ return nil
+ }
+ var (
+ sbkt = bkt.Bucket(k)
+ si = snapshots.Info{
+ Name: string(k),
+ }
+ )
+ if err := readSnapshot(sbkt, nil, &si); err != nil {
+ return err
+ }
+ if !filter.Match(adaptSnapshot(si)) {
+ return nil
+ }
+
+ return fn(ctx, si)
+ })
+ })
+}
+
+// GetSnapshot returns the metadata for the active or view snapshot transaction
+// referenced by the given key. Requires a context with a storage transaction.
+func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) {
+ err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ sbkt := bkt.Bucket([]byte(key))
+ if sbkt == nil {
+ return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound)
+ }
+
+ s.ID = strconv.FormatUint(readID(sbkt), 10)
+ s.Kind = readKind(sbkt)
+
+ if s.Kind != snapshots.KindActive && s.Kind != snapshots.KindView {
+ return fmt.Errorf("requested snapshot %v not active or view: %w", key, errdefs.ErrFailedPrecondition)
+ }
+
+ if parentKey := sbkt.Get(bucketKeyParent); len(parentKey) > 0 {
+ spbkt := bkt.Bucket(parentKey)
+ if spbkt == nil {
+ return fmt.Errorf("parent does not exist: %w", errdefs.ErrNotFound)
+ }
+
+ s.ParentIDs, err = parents(bkt, spbkt, readID(spbkt))
+ if err != nil {
+ return fmt.Errorf("failed to get parent chain: %w", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return Snapshot{}, err
+ }
+
+ return
+}
+
+// CreateSnapshot inserts a record for an active or view snapshot with the provided parent.
+func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) (s Snapshot, err error) {
+ switch kind {
+ case snapshots.KindActive, snapshots.KindView:
+ default:
+ return Snapshot{}, fmt.Errorf("snapshot type %v invalid; only snapshots of type Active or View can be created: %w", kind, errdefs.ErrInvalidArgument)
+ }
+ var base snapshots.Info
+ for _, opt := range opts {
+ if err := opt(&base); err != nil {
+ return Snapshot{}, err
+ }
+ }
+
+ err = createBucketIfNotExists(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ var (
+ spbkt *bolt.Bucket
+ )
+ if parent != "" {
+ spbkt = bkt.Bucket([]byte(parent))
+ if spbkt == nil {
+ return fmt.Errorf("missing parent %q bucket: %w", parent, errdefs.ErrNotFound)
+ }
+
+ if readKind(spbkt) != snapshots.KindCommitted {
+ return fmt.Errorf("parent %q is not committed snapshot: %w", parent, errdefs.ErrInvalidArgument)
+ }
+ }
+ sbkt, err := bkt.CreateBucket([]byte(key))
+ if err != nil {
+ if err == bolt.ErrBucketExists {
+ err = fmt.Errorf("snapshot %v: %w", key, errdefs.ErrAlreadyExists)
+ }
+ return err
+ }
+
+ id, err := bkt.NextSequence()
+ if err != nil {
+ return fmt.Errorf("unable to get identifier for snapshot %q: %w", key, err)
+ }
+
+ t := time.Now().UTC()
+ si := snapshots.Info{
+ Parent: parent,
+ Kind: kind,
+ Labels: base.Labels,
+ Created: t,
+ Updated: t,
+ }
+ if err := putSnapshot(sbkt, id, si); err != nil {
+ return err
+ }
+
+ if spbkt != nil {
+ pid := readID(spbkt)
+
+ // Store a backlink from the key to the parent. Store the snapshot name
+ // as the value to allow following the backlink to the snapshot value.
+ if err := pbkt.Put(parentKey(pid, id), []byte(key)); err != nil {
+ return fmt.Errorf("failed to write parent link for snapshot %q: %w", key, err)
+ }
+
+ s.ParentIDs, err = parents(bkt, spbkt, pid)
+ if err != nil {
+ return fmt.Errorf("failed to get parent chain for snapshot %q: %w", key, err)
+ }
+ }
+
+ s.ID = strconv.FormatUint(id, 10)
+ s.Kind = kind
+ return nil
+ })
+ if err != nil {
+ return Snapshot{}, err
+ }
+
+ return
+}
+
+// Remove removes a snapshot from the metastore. The string identifier for the
+// snapshot is returned as well as the kind. The provided context must contain a
+// writable transaction.
+func Remove(ctx context.Context, key string) (string, snapshots.Kind, error) {
+ var (
+ id uint64
+ si snapshots.Info
+ )
+
+ if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ sbkt := bkt.Bucket([]byte(key))
+ if sbkt == nil {
+ return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound)
+ }
+
+ if err := readSnapshot(sbkt, &id, &si); err != nil {
+ return fmt.Errorf("failed to read snapshot %s: %w", key, err)
+ }
+
+ if pbkt != nil {
+ k, _ := pbkt.Cursor().Seek(parentPrefixKey(id))
+ if getParentPrefix(k) == id {
+ return fmt.Errorf("cannot remove snapshot with child: %w", errdefs.ErrFailedPrecondition)
+ }
+
+ if si.Parent != "" {
+ spbkt := bkt.Bucket([]byte(si.Parent))
+ if spbkt == nil {
+ return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound)
+ }
+
+ if err := pbkt.Delete(parentKey(readID(spbkt), id)); err != nil {
+ return fmt.Errorf("failed to delete parent link: %w", err)
+ }
+ }
+ }
+
+ if err := bkt.DeleteBucket([]byte(key)); err != nil {
+ return fmt.Errorf("failed to delete snapshot: %w", err)
+ }
+
+ return nil
+ }); err != nil {
+ return "", 0, err
+ }
+
+ return strconv.FormatUint(id, 10), si.Kind, nil
+}
+
+// CommitActive renames the active snapshot transaction referenced by `key`
+// as a committed snapshot referenced by `Name`. The resulting snapshot will be
+// committed and readonly. The `key` reference will no longer be available for
+// lookup or removal. The returned string identifier for the committed snapshot
+// is the same identifier of the original active snapshot. The provided context
+// must contain a writable transaction.
+func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, opts ...snapshots.Opt) (string, error) {
+ var (
+ id uint64
+ base snapshots.Info
+ )
+ for _, opt := range opts {
+ if err := opt(&base); err != nil {
+ return "", err
+ }
+ }
+
+ if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error {
+ dbkt, err := bkt.CreateBucket([]byte(name))
+ if err != nil {
+ if err == bolt.ErrBucketExists {
+ err = errdefs.ErrAlreadyExists
+ }
+ return fmt.Errorf("committed snapshot %v: %w", name, err)
+ }
+ sbkt := bkt.Bucket([]byte(key))
+ if sbkt == nil {
+ return fmt.Errorf("failed to get active snapshot %q: %w", key, errdefs.ErrNotFound)
+ }
+
+ var si snapshots.Info
+ if err := readSnapshot(sbkt, &id, &si); err != nil {
+ return fmt.Errorf("failed to read active snapshot %q: %w", key, err)
+ }
+
+ if si.Kind != snapshots.KindActive {
+ return fmt.Errorf("snapshot %q is not active: %w", key, errdefs.ErrFailedPrecondition)
+ }
+ si.Kind = snapshots.KindCommitted
+ si.Created = time.Now().UTC()
+ si.Updated = si.Created
+
+ // Replace labels, do not inherit
+ si.Labels = base.Labels
+
+ if err := putSnapshot(dbkt, id, si); err != nil {
+ return err
+ }
+ if err := putUsage(dbkt, usage); err != nil {
+ return err
+ }
+ if err := bkt.DeleteBucket([]byte(key)); err != nil {
+ return fmt.Errorf("failed to delete active snapshot %q: %w", key, err)
+ }
+ if si.Parent != "" {
+ spbkt := bkt.Bucket([]byte(si.Parent))
+ if spbkt == nil {
+ return fmt.Errorf("missing parent %q of snapshot %q: %w", si.Parent, key, errdefs.ErrNotFound)
+ }
+ pid := readID(spbkt)
+
+ // Updates parent back link to use new key
+ if err := pbkt.Put(parentKey(pid, id), []byte(name)); err != nil {
+ return fmt.Errorf("failed to update parent link %q from %q to %q: %w", pid, key, name, err)
+ }
+ }
+
+ return nil
+ }); err != nil {
+ return "", err
+ }
+
+ return strconv.FormatUint(id, 10), nil
+}
+
+// IDMap returns all the IDs mapped to their key
+func IDMap(ctx context.Context) (map[string]string, error) {
+ m := map[string]string{}
+ if err := withBucket(ctx, func(ctx context.Context, bkt, _ *bolt.Bucket) error {
+ return bkt.ForEach(func(k, v []byte) error {
+ // skip non buckets
+ if v != nil {
+ return nil
+ }
+ id := readID(bkt.Bucket(k))
+ m[strconv.FormatUint(id, 10)] = string(k)
+ return nil
+ })
+ }); err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+func withSnapshotBucket(ctx context.Context, key string, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error {
+ tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
+ if !ok {
+ return ErrNoTransaction
+ }
+ vbkt := tx.Bucket(bucketKeyStorageVersion)
+ if vbkt == nil {
+ return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound)
+ }
+ bkt := vbkt.Bucket(bucketKeySnapshot)
+ if bkt == nil {
+ return fmt.Errorf("snapshots bucket does not exist: %w", errdefs.ErrNotFound)
+ }
+ bkt = bkt.Bucket([]byte(key))
+ if bkt == nil {
+ return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound)
+ }
+
+ return fn(ctx, bkt, vbkt.Bucket(bucketKeyParents))
+}
+
+func withBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error {
+ tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
+ if !ok {
+ return ErrNoTransaction
+ }
+ bkt := tx.Bucket(bucketKeyStorageVersion)
+ if bkt == nil {
+ return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound)
+ }
+ return fn(ctx, bkt.Bucket(bucketKeySnapshot), bkt.Bucket(bucketKeyParents))
+}
+
+func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error {
+ tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
+ if !ok {
+ return ErrNoTransaction
+ }
+
+ bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion)
+ if err != nil {
+ return fmt.Errorf("failed to create version bucket: %w", err)
+ }
+ sbkt, err := bkt.CreateBucketIfNotExists(bucketKeySnapshot)
+ if err != nil {
+ return fmt.Errorf("failed to create snapshots bucket: %w", err)
+ }
+ pbkt, err := bkt.CreateBucketIfNotExists(bucketKeyParents)
+ if err != nil {
+ return fmt.Errorf("failed to create parents bucket: %w", err)
+ }
+ return fn(ctx, sbkt, pbkt)
+}
+
+func parents(bkt, pbkt *bolt.Bucket, parent uint64) (parents []string, err error) {
+ for {
+ parents = append(parents, strconv.FormatUint(parent, 10))
+
+ parentKey := pbkt.Get(bucketKeyParent)
+ if len(parentKey) == 0 {
+ return
+ }
+ pbkt = bkt.Bucket(parentKey)
+ if pbkt == nil {
+ return nil, fmt.Errorf("missing parent: %w", errdefs.ErrNotFound)
+ }
+
+ parent = readID(pbkt)
+ }
+}
+
+func readKind(bkt *bolt.Bucket) (k snapshots.Kind) {
+ kind := bkt.Get(bucketKeyKind)
+ if len(kind) == 1 {
+ k = snapshots.Kind(kind[0])
+ }
+ return
+}
+
+func readID(bkt *bolt.Bucket) uint64 {
+ id, _ := binary.Uvarint(bkt.Get(bucketKeyID))
+ return id
+}
+
+func readSnapshot(bkt *bolt.Bucket, id *uint64, si *snapshots.Info) error {
+ if id != nil {
+ *id = readID(bkt)
+ }
+ if si != nil {
+ si.Kind = readKind(bkt)
+ si.Parent = string(bkt.Get(bucketKeyParent))
+
+ if err := boltutil.ReadTimestamps(bkt, &si.Created, &si.Updated); err != nil {
+ return err
+ }
+
+ labels, err := boltutil.ReadLabels(bkt)
+ if err != nil {
+ return err
+ }
+ si.Labels = labels
+ }
+
+ return nil
+}
+
+func putSnapshot(bkt *bolt.Bucket, id uint64, si snapshots.Info) error {
+ idEncoded, err := encodeID(id)
+ if err != nil {
+ return err
+ }
+
+ updates := [][2][]byte{
+ {bucketKeyID, idEncoded},
+ {bucketKeyKind, []byte{byte(si.Kind)}},
+ }
+ if si.Parent != "" {
+ updates = append(updates, [2][]byte{bucketKeyParent, []byte(si.Parent)})
+ }
+ for _, v := range updates {
+ if err := bkt.Put(v[0], v[1]); err != nil {
+ return err
+ }
+ }
+ if err := boltutil.WriteTimestamps(bkt, si.Created, si.Updated); err != nil {
+ return err
+ }
+ return boltutil.WriteLabels(bkt, si.Labels)
+}
+
+func getUsage(bkt *bolt.Bucket, usage *snapshots.Usage) {
+ usage.Inodes, _ = binary.Varint(bkt.Get(bucketKeyInodes))
+ usage.Size, _ = binary.Varint(bkt.Get(bucketKeySize))
+}
+
+func putUsage(bkt *bolt.Bucket, usage snapshots.Usage) error {
+ for _, v := range []struct {
+ key []byte
+ value int64
+ }{
+ {bucketKeyInodes, usage.Inodes},
+ {bucketKeySize, usage.Size},
+ } {
+ e, err := encodeSize(v.value)
+ if err != nil {
+ return err
+ }
+ if err := bkt.Put(v.key, e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func encodeSize(size int64) ([]byte, error) {
+ var (
+ buf [binary.MaxVarintLen64]byte
+ sizeEncoded = buf[:]
+ )
+ sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, size)]
+
+ if len(sizeEncoded) == 0 {
+ return nil, fmt.Errorf("failed encoding size = %v", size)
+ }
+ return sizeEncoded, nil
+}
+
+func encodeID(id uint64) ([]byte, error) {
+ var (
+ buf [binary.MaxVarintLen64]byte
+ idEncoded = buf[:]
+ )
+ idEncoded = idEncoded[:binary.PutUvarint(idEncoded, id)]
+
+ if len(idEncoded) == 0 {
+ return nil, fmt.Errorf("failed encoding id = %v", id)
+ }
+ return idEncoded, nil
+}
+
+func adaptSnapshot(info snapshots.Info) filters.Adaptor {
+ return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+ if len(fieldpath) == 0 {
+ return "", false
+ }
+
+ switch fieldpath[0] {
+ case "kind":
+ switch info.Kind {
+ case snapshots.KindActive:
+ return "active", true
+ case snapshots.KindView:
+ return "view", true
+ case snapshots.KindCommitted:
+ return "committed", true
+ }
+ case "name":
+ return info.Name, true
+ case "parent":
+ return info.Parent, true
+ case "labels":
+ if len(info.Labels) == 0 {
+ return "", false
+ }
+
+ v, ok := info.Labels[strings.Join(fieldpath[1:], ".")]
+ return v, ok
+ }
+
+ return "", false
+ })
+}
diff --git a/vendor/github.com/containerd/containerd/v2/core/snapshots/storage/metastore.go b/vendor/github.com/containerd/containerd/v2/core/snapshots/storage/metastore.go
new file mode 100644
index 000000000000..bbfc091ff39a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/core/snapshots/storage/metastore.go
@@ -0,0 +1,157 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package storage provides a metadata storage implementation for snapshot
+// drivers. Drive implementations are responsible for starting and managing
+// transactions using the defined context creator. This storage package uses
+// BoltDB for storing metadata. Access to the raw boltdb transaction is not
+// provided, but the stored object is provided by the proto subpackage.
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/containerd/containerd/v2/core/snapshots"
+ "github.com/containerd/log"
+ bolt "go.etcd.io/bbolt"
+)
+
+// Transactor is used to finalize an active transaction.
+type Transactor interface {
+ // Commit commits any changes made during the transaction. On error a
+ // caller is expected to clean up any resources which would have relied
+ // on data mutated as part of this transaction. Only writable
+ // transactions can commit, non-writable must call Rollback.
+ Commit() error
+
+ // Rollback rolls back any changes made during the transaction. This
+ // must be called on all non-writable transactions and aborted writable
+ // transaction.
+ Rollback() error
+}
+
+// Snapshot hold the metadata for an active or view snapshot transaction. The
+// ParentIDs hold the snapshot identifiers for the committed snapshots this
+// active or view is based on. The ParentIDs are ordered from the highest to the
+// lowest base, meaning they should be applied in order from the last index to
+// the first index. The first index should always be considered the active
+// snapshot's immediate parent.
+type Snapshot struct {
+ Kind snapshots.Kind
+ ID string
+ ParentIDs []string
+}
+
+// MetaStore is used to store metadata related to a snapshot driver. The
+// MetaStore is intended to store metadata related to name, state and
+// parentage. Using the MetaStore is not required to implement a snapshot
+// driver but can be used to handle the persistence and transactional
+// complexities of a driver implementation.
+type MetaStore struct {
+ dbfile string
+
+ dbL sync.Mutex
+ db *bolt.DB
+}
+
+// NewMetaStore returns a snapshot MetaStore for storage of metadata related to
+// a snapshot driver backed by a bolt file database. This implementation is
+// strongly consistent and does all metadata changes in a transaction to prevent
+// against process crashes causing inconsistent metadata state.
+func NewMetaStore(dbfile string) (*MetaStore, error) {
+ return &MetaStore{
+ dbfile: dbfile,
+ }, nil
+}
+
+type transactionKey struct{}
+
+// TransactionContext creates a new transaction context. The writable value
+// should be set to true for transactions which are expected to mutate data.
+func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (context.Context, Transactor, error) {
+ ms.dbL.Lock()
+ if ms.db == nil {
+ db, err := bolt.Open(ms.dbfile, 0600, nil)
+ if err != nil {
+ ms.dbL.Unlock()
+ return ctx, nil, fmt.Errorf("failed to open database file: %w", err)
+ }
+ ms.db = db
+ }
+ ms.dbL.Unlock()
+
+ tx, err := ms.db.Begin(writable)
+ if err != nil {
+ return ctx, nil, fmt.Errorf("failed to start transaction: %w", err)
+ }
+
+ ctx = context.WithValue(ctx, transactionKey{}, tx)
+
+ return ctx, tx, nil
+}
+
+// TransactionCallback represents a callback to be invoked while under a metastore transaction.
+type TransactionCallback func(ctx context.Context) error
+
+// WithTransaction is a convenience method to run a function `fn` while holding a meta store transaction.
+// If the callback `fn` returns an error or the transaction is not writable, the database transaction will be discarded.
+func (ms *MetaStore) WithTransaction(ctx context.Context, writable bool, fn TransactionCallback) error {
+ ctx, trans, err := ms.TransactionContext(ctx, writable)
+ if err != nil {
+ return err
+ }
+
+ var result []error
+ err = fn(ctx)
+ if err != nil {
+ result = append(result, err)
+ }
+
+ // Always rollback if transaction is not writable
+ if err != nil || !writable {
+ if terr := trans.Rollback(); terr != nil {
+ log.G(ctx).WithError(terr).Error("failed to rollback transaction")
+
+ result = append(result, fmt.Errorf("rollback failed: %w", terr))
+ }
+ } else {
+ if terr := trans.Commit(); terr != nil {
+ log.G(ctx).WithError(terr).Error("failed to commit transaction")
+
+ result = append(result, fmt.Errorf("commit failed: %w", terr))
+ }
+ }
+
+ if err := errors.Join(result...); err != nil {
+ log.G(ctx).WithError(err).Debug("snapshotter error")
+ return err
+ }
+
+ return nil
+}
+
+// Close closes the metastore and any underlying database connections
+func (ms *MetaStore) Close() error {
+ ms.dbL.Lock()
+ defer ms.dbL.Unlock()
+ if ms.db == nil {
+ return nil
+ }
+ return ms.db.Close()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults.go
new file mode 100644
index 000000000000..6f5b122ecf93
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults.go
@@ -0,0 +1,32 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultMaxRecvMsgSize defines the default maximum message size for
+ // receiving protobufs passed over the GRPC API.
+ DefaultMaxRecvMsgSize = 16 << 20
+ // DefaultMaxSendMsgSize defines the default maximum message size for
+ // sending protobufs passed over the GRPC API.
+ DefaultMaxSendMsgSize = 16 << 20
+ // DefaultRuntimeNSLabel defines the namespace label to check for the
+ // default runtime
+ DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
+ // DefaultSnapshotterNSLabel defines the namespace label to check for the
+ // default snapshotter
+ DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go
new file mode 100644
index 000000000000..1391884cde7f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go
@@ -0,0 +1,37 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultRootDir is the default location used by containerd to store
+ // persistent data
+ DefaultRootDir = "/var/lib/containerd"
+ // DefaultStateDir is the default location used by containerd to store
+ // transient data
+ DefaultStateDir = "/var/run/containerd"
+ // DefaultAddress is the default unix socket address
+ DefaultAddress = "/var/run/containerd/containerd.sock"
+ // DefaultDebugAddress is the default unix socket address for pprof data
+ DefaultDebugAddress = "/var/run/containerd/debug.sock"
+ // DefaultFIFODir is the default location used by client-side cio library
+ // to store FIFOs.
+ DefaultFIFODir = "/var/run/containerd/fifo"
+ // DefaultRuntime would be a multiple of choices, thus empty
+ DefaultRuntime = ""
+ // DefaultConfigDir is the default location for config files.
+ DefaultConfigDir = "/etc/containerd"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_differ_windows.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_differ_windows.go
new file mode 100644
index 000000000000..0762d259eeec
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_differ_windows.go
@@ -0,0 +1,23 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultDiffer will set the default differ for the platform.
+ // This differ should be compatible with the windows snapshotter.
+ DefaultDiffer = "windows"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_freebsd.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_freebsd.go
new file mode 100644
index 000000000000..d74133012a5a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_freebsd.go
@@ -0,0 +1,22 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultRuntime is the default freebsd runtime
+ DefaultRuntime = "wtf.sbk.runj.v1"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go
new file mode 100644
index 000000000000..7187c5a76739
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go
@@ -0,0 +1,22 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultRuntime is the default linux runtime
+ DefaultRuntime = "io.containerd.runc.v2"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go
new file mode 100644
index 000000000000..ade194717749
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go
@@ -0,0 +1,24 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultSnapshotter will set the default snapshotter for the platform.
+ // This will be based on the client compilation target, so take that into
+ // account when choosing this value.
+ DefaultSnapshotter = "overlayfs"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go
new file mode 100644
index 000000000000..fc8b081ff8b3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go
@@ -0,0 +1,26 @@
+//go:build darwin || freebsd || solaris
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultSnapshotter will set the default snapshotter for the platform.
+ // This will be based on the client compilation target, so take that into
+ // account when choosing this value.
+ DefaultSnapshotter = "native"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go
new file mode 100644
index 000000000000..37e5f7574d6e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go
@@ -0,0 +1,24 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultSnapshotter will set the default snapshotter for the platform.
+ // This will be based on the client compilation target, so take that into
+ // account when choosing this value.
+ DefaultSnapshotter = "windows"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go
new file mode 100644
index 000000000000..e68a522cadd6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go
@@ -0,0 +1,37 @@
+//go:build !windows && !darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+const (
+ // DefaultRootDir is the default location used by containerd to store
+ // persistent data
+ DefaultRootDir = "/var/lib/containerd"
+ // DefaultStateDir is the default location used by containerd to store
+ // transient data
+ DefaultStateDir = "/run/containerd"
+ // DefaultAddress is the default unix socket address
+ DefaultAddress = "/run/containerd/containerd.sock"
+ // DefaultDebugAddress is the default unix socket address for pprof data
+ DefaultDebugAddress = "/run/containerd/debug.sock"
+ // DefaultFIFODir is the default location used by client-side cio library
+ // to store FIFOs.
+ DefaultFIFODir = "/run/containerd/fifo"
+ // DefaultConfigDir is the default location for config files.
+ DefaultConfigDir = "/etc/containerd"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go
new file mode 100644
index 000000000000..9f4bed8b077e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go
@@ -0,0 +1,46 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package defaults
+
+import (
+ "os"
+ "path/filepath"
+)
+
+var (
+ // DefaultRootDir is the default location used by containerd to store
+ // persistent data
+ DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
+ // DefaultStateDir is the default location used by containerd to store
+ // transient data
+ DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
+
+ // DefaultConfigDir is the default location for config files.
+ DefaultConfigDir = filepath.Join(os.Getenv("programfiles"), "containerd")
+)
+
+const (
+ // DefaultAddress is the default winpipe address
+ DefaultAddress = `\\.\pipe\containerd-containerd`
+ // DefaultDebugAddress is the default winpipe address for pprof data
+ DefaultDebugAddress = `\\.\pipe\containerd-debug`
+ // DefaultFIFODir is the default location used by client-side cio library
+ // to store FIFOs. Unused on Windows.
+ DefaultFIFODir = ""
+ // DefaultRuntime is the default windows runtime
+ DefaultRuntime = "io.containerd.runhcs.v1"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/defaults/doc.go b/vendor/github.com/containerd/containerd/v2/defaults/doc.go
new file mode 100644
index 000000000000..6da863ce2e89
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/defaults/doc.go
@@ -0,0 +1,19 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package defaults provides several common defaults for interacting with
+// containerd. These can be used on the client-side or server-side.
+package defaults
diff --git a/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go
new file mode 100644
index 000000000000..f19f0e9ce4dd
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go
@@ -0,0 +1,130 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fsverity
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "unsafe"
+
+ "github.com/containerd/containerd/v2/pkg/kernelversion"
+ "golang.org/x/sys/unix"
+)
+
+type fsverityEnableArg struct {
+ version uint32
+ hashAlgorithm uint32
+ blockSize uint32
+ saltSize uint32
+ saltPtr uint64
+ sigSize uint32
+ reserved1 uint32
+ sigPtr uint64
+ reserved2 [11]uint64
+}
+
+const (
+ defaultBlockSize int = 4096
+ maxDigestSize uint16 = 64
+)
+
+func IsSupported(rootPath string) (bool, error) {
+ minKernelVersion := kernelversion.KernelVersion{Kernel: 5, Major: 4}
+ s, err := kernelversion.GreaterEqualThan(minKernelVersion)
+ if err != nil {
+ return s, err
+ }
+
+ integrityDir, err := os.MkdirTemp(rootPath, ".fsverity-check-*")
+ if err != nil {
+ return false, err
+ }
+ defer os.RemoveAll(integrityDir)
+
+ digestPath := filepath.Join(integrityDir, "supported")
+ digestFile, err := os.Create(digestPath)
+ if err != nil {
+ return false, err
+ }
+
+ digestFile.Close()
+
+ eerr := Enable(digestPath)
+ if eerr != nil {
+ return false, eerr
+ }
+
+ return true, nil
+}
+
+func IsEnabled(path string) (bool, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ var attr int
+
+ _, _, flagErr := unix.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(unix.FS_IOC_GETFLAGS), uintptr(unsafe.Pointer(&attr)))
+ if flagErr != 0 {
+ return false, fmt.Errorf("error getting inode flags: %w", flagErr)
+ }
+
+ if attr&unix.FS_VERITY_FL == unix.FS_VERITY_FL {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func Enable(path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ var args = &fsverityEnableArg{}
+ args.version = 1
+ args.hashAlgorithm = 1
+
+ // fsverity block size should be the minimum between the page size
+ // and the file system block size
+ // If neither value is retrieved successfully, set fsverity block size to the default value
+ blockSize := unix.Getpagesize()
+
+ s := unix.Stat_t{}
+ serr := unix.Stat(path, &s)
+ if serr == nil && int(s.Blksize) < blockSize {
+ blockSize = int(s.Blksize)
+ }
+
+ if blockSize <= 0 {
+ blockSize = defaultBlockSize
+ }
+
+ args.blockSize = uint32(blockSize)
+
+ _, _, errno := unix.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(args)))
+ if errno != 0 {
+ return fmt.Errorf("enable fsverity failed: %w", errno)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go
new file mode 100644
index 000000000000..f50789adabbb
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go
@@ -0,0 +1,33 @@
+//go:build !linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fsverity
+
+import "fmt"
+
+func IsSupported(rootPath string) (bool, error) {
+ return false, fmt.Errorf("fsverity is only supported on Linux systems")
+}
+
+func IsEnabled(path string) (bool, error) {
+ return false, fmt.Errorf("fsverity is only supported on Linux systems")
+}
+
+func Enable(_ string) error {
+ return fmt.Errorf("fsverity is only supported on Linux systems")
+}
diff --git a/vendor/github.com/containerd/containerd/v2/internal/randutil/randutil.go b/vendor/github.com/containerd/containerd/v2/internal/randutil/randutil.go
new file mode 100644
index 000000000000..f4b657d7dd87
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/internal/randutil/randutil.go
@@ -0,0 +1,48 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package randutil provides utilities for [cyrpto/rand].
+package randutil
+
+import (
+ "crypto/rand"
+ "math"
+ "math/big"
+)
+
+// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood.
+func Int63n(n int64) int64 {
+ b, err := rand.Int(rand.Reader, big.NewInt(n))
+ if err != nil {
+ panic(err)
+ }
+ return b.Int64()
+}
+
+// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood.
+func Int63() int64 {
+ return Int63n(math.MaxInt64)
+}
+
+// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood.
+func Intn(n int) int {
+ return int(Int63n(int64(n)))
+}
+
+// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood.
+func Int() int {
+ return int(Int63())
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go
new file mode 100644
index 000000000000..e5c0a18b9738
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go
@@ -0,0 +1,326 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compression
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strconv"
+ "sync"
+
+ "github.com/containerd/log"
+ "github.com/klauspost/compress/zstd"
+)
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+)
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Zstd is zstd compression algorithm.
+ Zstd
+)
+
+const (
+ disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
+ disableIgzipEnv = "CONTAINERD_DISABLE_IGZIP"
+)
+
+var (
+ initGzip sync.Once
+ gzipPath string
+)
+
+var (
+ bufioReader32KPool = &sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
+ }
+)
+
+// DecompressReadCloser include the stream after decompress and the compress method detected.
+type DecompressReadCloser interface {
+ io.ReadCloser
+ // GetCompression returns the compress method which is used before decompressing
+ GetCompression() Compression
+}
+
+type readCloserWrapper struct {
+ io.Reader
+ compression Compression
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ if r.closer != nil {
+ return r.closer()
+ }
+ return nil
+}
+
+func (r *readCloserWrapper) GetCompression() Compression {
+ return r.compression
+}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (w *writeCloserWrapper) Close() error {
+ if w.closer != nil {
+ w.closer()
+ }
+ return nil
+}
+
+type bufferedReader struct {
+ buf *bufio.Reader
+}
+
+func newBufferedReader(r io.Reader) *bufferedReader {
+ buf := bufioReader32KPool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return &bufferedReader{buf}
+}
+
+func (r *bufferedReader) Read(p []byte) (n int, err error) {
+ if r.buf == nil {
+ return 0, io.EOF
+ }
+ n, err = r.buf.Read(p)
+ if err == io.EOF {
+ r.buf.Reset(nil)
+ bufioReader32KPool.Put(r.buf)
+ r.buf = nil
+ }
+ return
+}
+
+func (r *bufferedReader) Peek(n int) ([]byte, error) {
+ if r.buf == nil {
+ return nil, io.EOF
+ }
+ return r.buf.Peek(n)
+}
+
+const (
+ zstdMagicSkippableStart = 0x184D2A50
+ zstdMagicSkippableMask = 0xFFFFFFF0
+)
+
+var (
+ gzipMagic = []byte{0x1F, 0x8B, 0x08}
+ zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
+)
+
+type matcher = func([]byte) bool
+
+func magicNumberMatcher(m []byte) matcher {
+ return func(source []byte) bool {
+ return bytes.HasPrefix(source, m)
+ }
+}
+
+// zstdMatcher detects zstd compression algorithm.
+// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
+// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details.
+func zstdMatcher() matcher {
+ return func(source []byte) bool {
+ if bytes.HasPrefix(source, zstdMagic) {
+ // Zstandard frame
+ return true
+ }
+ // skippable frame
+ if len(source) < 8 {
+ return false
+ }
+ // magic number from 0x184D2A50 to 0x184D2A5F.
+ if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
+ return true
+ }
+ return false
+ }
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, fn := range map[Compression]matcher{
+ Gzip: magicNumberMatcher(gzipMagic),
+ Zstd: zstdMatcher(),
+ } {
+ if fn(source) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
+ buf := newBufferedReader(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue docker/docker#18170
+ return nil, err
+ }
+
+ switch compression := DetectCompression(bs); compression {
+ case Uncompressed:
+ return &readCloserWrapper{
+ Reader: buf,
+ compression: compression,
+ }, nil
+ case Gzip:
+ ctx, cancel := context.WithCancel(context.Background())
+ gzReader, err := gzipDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ return &readCloserWrapper{
+ Reader: gzReader,
+ compression: compression,
+ closer: func() error {
+ cancel()
+ return gzReader.Close()
+ },
+ }, nil
+ case Zstd:
+ zstdReader, err := zstd.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &readCloserWrapper{
+ Reader: zstdReader,
+ compression: compression,
+ closer: func() error {
+ zstdReader.Close()
+ return nil
+ },
+ }, nil
+
+ default:
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ switch compression {
+ case Uncompressed:
+ return &writeCloserWrapper{dest, nil}, nil
+ case Gzip:
+ return gzip.NewWriter(dest), nil
+ case Zstd:
+ return zstd.NewWriter(dest)
+ default:
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Gzip:
+ return "gz"
+ case Zstd:
+ return "zst"
+ }
+ return ""
+}
+
+func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
+ initGzip.Do(func() {
+ if gzipPath = detectCommand("igzip", disableIgzipEnv); gzipPath != "" {
+ log.L.Debug("using igzip for decompression")
+ return
+ }
+ if gzipPath = detectCommand("unpigz", disablePigzEnv); gzipPath != "" {
+ log.L.Debug("using unpigz for decompression")
+ }
+ })
+
+ if gzipPath == "" {
+ return gzip.NewReader(buf)
+ }
+ return cmdStream(exec.CommandContext(ctx, gzipPath, "-d", "-c"), buf)
+}
+
+func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+
+ cmd.Stdin = in
+ cmd.Stdout = writer
+
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ writer.Close()
+ }
+ }()
+
+ return reader, nil
+}
+
+func detectCommand(path, disableEnvName string) string {
+ // Check if this command is disabled via the env variable
+ value := os.Getenv(disableEnvName)
+ if value != "" {
+ disable, err := strconv.ParseBool(value)
+ if err != nil {
+ log.L.WithError(err).Warnf("could not parse %s: %s", disableEnvName, value)
+ }
+
+ if disable {
+ return ""
+ }
+ }
+
+ path, err := exec.LookPath(path)
+ if err != nil {
+ log.L.WithError(err).Debugf("%s not found", path)
+ return ""
+ }
+
+ return path
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go
new file mode 100644
index 000000000000..3516494ac0bd
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go
@@ -0,0 +1,28 @@
+//go:build gofuzz
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compression
+
+import (
+ "bytes"
+)
+
+func FuzzDecompressStream(data []byte) int {
+ _, _ = DecompressStream(bytes.NewReader(data))
+ return 1
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go b/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go
new file mode 100644
index 000000000000..603bf41f0602
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go
@@ -0,0 +1,72 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package deprecation
+
+type Warning string
+
+const (
+ // Prefix is a standard prefix for all Warnings, used for filtering plugin Exports
+ Prefix = "io.containerd.deprecation/"
+ // PullSchema1Image is a warning for the use of schema 1 images
+ PullSchema1Image Warning = Prefix + "pull-schema-1-image"
+ // GoPluginLibrary is a warning for the use of dynamic library Go plugins
+ GoPluginLibrary Warning = Prefix + "go-plugin-library"
+ // CRIRegistryMirrors is a warning for the use of the `mirrors` property
+ CRIRegistryMirrors Warning = Prefix + "cri-registry-mirrors"
+ // CRIRegistryAuths is a warning for the use of the `auths` property
+ CRIRegistryAuths Warning = Prefix + "cri-registry-auths"
+ // CRIRegistryConfigs is a warning for the use of the `configs` property
+ CRIRegistryConfigs Warning = Prefix + "cri-registry-configs"
+ // OTLPTracingConfig is a warning for the use of the `otlp` property
+ TracingOTLPConfig Warning = Prefix + "tracing-processor-config"
+ // TracingServiceConfig is a warning for the use of the `tracing` property
+ TracingServiceConfig Warning = Prefix + "tracing-service-config"
+)
+
+const (
+ EnvPrefix = "CONTAINERD_ENABLE_DEPRECATED_"
+ EnvPullSchema1Image = EnvPrefix + "PULL_SCHEMA_1_IMAGE"
+)
+
+var messages = map[Warning]string{
+ PullSchema1Image: "Schema 1 images are deprecated since containerd v1.7, disabled in containerd v2.0, and will be removed in containerd v2.1. " +
+ `Since containerd v1.7.8, schema 1 images are identified by the "io.containerd.image/converted-docker-schema1" label.`,
+ GoPluginLibrary: "Dynamically-linked Go plugins as containerd runtimes are deprecated since containerd v2.0 and removed in containerd v2.1.",
+ CRIRegistryMirrors: "The `mirrors` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." +
+ "Use `config_path` instead.",
+ CRIRegistryAuths: "The `auths` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.3 and will be removed in containerd v2.1." +
+ "Use `ImagePullSecrets` instead.",
+ CRIRegistryConfigs: "The `configs` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." +
+ "Use `config_path` instead.",
+
+ TracingOTLPConfig: "The `otlp` property of `[plugins.\"io.containerd.tracing.processor.v1\".otlp]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." +
+ "Use OTLP environment variables instead: https://opentelemetry.io/docs/specs/otel/protocol/exporter/",
+ TracingServiceConfig: "The `tracing` property of `[plugins.\"io.containerd.internal.v1\".tracing]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." +
+ "Use OTEL environment variables instead: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/",
+}
+
+// Valid checks whether a given Warning is valid
+func Valid(id Warning) bool {
+ _, ok := messages[id]
+ return ok
+}
+
+// Message returns the human-readable message for a given Warning
+func Message(id Warning) (string, bool) {
+ msg, ok := messages[id]
+ return msg, ok
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer.go
new file mode 100644
index 000000000000..125edf69220b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer.go
@@ -0,0 +1,74 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dialer
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+)
+
+type dialResult struct {
+ c net.Conn
+ err error
+}
+
+// ContextDialer returns a GRPC net.Conn connected to the provided address
+func ContextDialer(ctx context.Context, address string) (net.Conn, error) {
+ if deadline, ok := ctx.Deadline(); ok {
+ return timeoutDialer(address, time.Until(deadline))
+ }
+ return timeoutDialer(address, 0)
+}
+
+func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) {
+ var (
+ stopC = make(chan struct{})
+ synC = make(chan *dialResult)
+ )
+ go func() {
+ defer close(synC)
+ for {
+ select {
+ case <-stopC:
+ return
+ default:
+ c, err := dialer(address, timeout)
+ if isNoent(err) {
+ <-time.After(10 * time.Millisecond)
+ continue
+ }
+ synC <- &dialResult{c, err}
+ return
+ }
+ }
+ }()
+ select {
+ case dr := <-synC:
+ return dr.c, dr.err
+ case <-time.After(timeout):
+ close(stopC)
+ go func() {
+ dr := <-synC
+ if dr != nil && dr.c != nil {
+ dr.c.Close()
+ }
+ }()
+ return nil, fmt.Errorf("dial %s: timeout", address)
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer_unix.go
new file mode 100644
index 000000000000..1c58d16b6ee2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer_unix.go
@@ -0,0 +1,43 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dialer
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "syscall"
+ "time"
+)
+
+// DialAddress returns the address with unix:// prepended to the
+// provided address
+func DialAddress(address string) string {
+ return fmt.Sprintf("unix://%s", address)
+}
+
+func isNoent(err error) bool {
+ return errors.Is(err, syscall.ENOENT)
+}
+
+func dialer(address string, timeout time.Duration) (net.Conn, error) {
+ address = strings.TrimPrefix(address, "unix://")
+ return net.DialTimeout("unix", address, timeout)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer_windows.go b/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer_windows.go
new file mode 100644
index 000000000000..cf74cc0d82e5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/dialer/dialer_windows.go
@@ -0,0 +1,47 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dialer
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ winio "github.com/Microsoft/go-winio"
+)
+
+func isNoent(err error) bool {
+ return os.IsNotExist(err)
+}
+
+func dialer(address string, timeout time.Duration) (net.Conn, error) {
+ address = strings.TrimPrefix(filepath.ToSlash(address), "npipe://")
+ return winio.DialPipe(address, &timeout)
+}
+
+// DialAddress returns the dial address with npipe:// prepended to the
+// provided address
+func DialAddress(address string) string {
+ address = filepath.ToSlash(address)
+ if !strings.HasPrefix(address, "npipe://") {
+ address = fmt.Sprintf("npipe://%s", address)
+ }
+ return address
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/adaptor.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/adaptor.go
new file mode 100644
index 000000000000..5a9c559c1e0c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/adaptor.go
@@ -0,0 +1,33 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package filters
+
+// Adaptor specifies the mapping of fieldpaths to a type. For the given field
+// path, the value and whether it is present should be returned. The mapping of
+// the fieldpath to a field is deferred to the adaptor implementation, but
+// should generally follow protobuf field path/mask semantics.
+type Adaptor interface {
+ Field(fieldpath []string) (value string, present bool)
+}
+
+// AdapterFunc allows implementation specific matching of fieldpaths
+type AdapterFunc func(fieldpath []string) (string, bool)
+
+// Field returns the field name and true if it exists
+func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
+ return fn(fieldpath)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/filter.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/filter.go
new file mode 100644
index 000000000000..dcc569a4b7d2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/filter.go
@@ -0,0 +1,178 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package filters defines a syntax and parser that can be used for the
+// filtration of items across the containerd API. The core is built on the
+// concept of protobuf field paths, with quoting. Several operators allow the
+// user to flexibly select items based on field presence, equality, inequality
+// and regular expressions. Flexible adaptors support working with any type.
+//
+// The syntax is fairly familiar, if you've used container ecosystem
+// projects. At the core, we base it on the concept of protobuf field
+// paths, augmenting with the ability to quote portions of the field path
+// to match arbitrary labels. These "selectors" come in the following
+// syntax:
+//
+// ```
+// []
+// ```
+//
+// A basic example is as follows:
+//
+// ```
+// name==foo
+// ```
+//
+// This would match all objects that have a field `name` with the value
+// `foo`. If we only want to test if the field is present, we can omit the
+// operator. This is most useful for matching labels in containerd. The
+// following will match objects that have the field "labels" and have the
+// label "foo" defined:
+//
+// ```
+// labels.foo
+// ```
+//
+// We also allow for quoting of parts of the field path to allow matching
+// of arbitrary items:
+//
+// ```
+// labels."very complex label"==something
+// ```
+//
+// We also define `!=` and `~=` as operators. The `!=` will match all
+// objects that don't match the value for a field and `~=` will compile the
+// target value as a regular expression and match the field value against that.
+//
+// Selectors can be combined using a comma, such that the resulting
+// selector will require all selectors are matched for the object to match.
+// The following example will match objects that are named `foo` and have
+// the label `bar`:
+//
+// ```
+// name==foo,labels.bar
+// ```
+package filters
+
+import (
+ "regexp"
+
+ "github.com/containerd/log"
+)
+
+// Filter matches specific resources based the provided filter
+type Filter interface {
+ Match(adaptor Adaptor) bool
+}
+
+// FilterFunc is a function that handles matching with an adaptor
+type FilterFunc func(Adaptor) bool
+
+// Match matches the FilterFunc returning true if the object matches the filter
+func (fn FilterFunc) Match(adaptor Adaptor) bool {
+ return fn(adaptor)
+}
+
+// Always is a filter that always returns true for any type of object
+var Always FilterFunc = func(adaptor Adaptor) bool {
+ return true
+}
+
+// Any allows multiple filters to be matched against the object
+type Any []Filter
+
+// Match returns true if any of the provided filters are true
+func (m Any) Match(adaptor Adaptor) bool {
+ for _, m := range m {
+ if m.Match(adaptor) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// All allows multiple filters to be matched against the object
+type All []Filter
+
+// Match only returns true if all filters match the object
+func (m All) Match(adaptor Adaptor) bool {
+ for _, m := range m {
+ if !m.Match(adaptor) {
+ return false
+ }
+ }
+
+ return true
+}
+
+type operator int
+
+const (
+ operatorPresent = iota
+ operatorEqual
+ operatorNotEqual
+ operatorMatches
+)
+
+func (op operator) String() string {
+ switch op {
+ case operatorPresent:
+ return "?"
+ case operatorEqual:
+ return "=="
+ case operatorNotEqual:
+ return "!="
+ case operatorMatches:
+ return "~="
+ }
+
+ return "unknown"
+}
+
+type selector struct {
+ fieldpath []string
+ operator operator
+ value string
+ re *regexp.Regexp
+}
+
+func (m selector) Match(adaptor Adaptor) bool {
+ value, present := adaptor.Field(m.fieldpath)
+
+ switch m.operator {
+ case operatorPresent:
+ return present
+ case operatorEqual:
+ return present && value == m.value
+ case operatorNotEqual:
+ return value != m.value
+ case operatorMatches:
+ if m.re == nil {
+ r, err := regexp.Compile(m.value)
+ if err != nil {
+ log.L.Errorf("error compiling regexp %q", m.value)
+ return false
+ }
+
+ m.re = r
+ }
+
+ return m.re.MatchString(value)
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/parser.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/parser.go
new file mode 100644
index 000000000000..e86ed8eb4626
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/parser.go
@@ -0,0 +1,290 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package filters
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/containerd/errdefs"
+)
+
+/*
+Parse the strings into a filter that may be used with an adaptor.
+
+The filter is made up of zero or more selectors.
+
+The format is a comma separated list of expressions, in the form of
+``, known as selectors. All selectors must match the
+target object for the filter to be true.
+
+We define the operators "==" for equality, "!=" for not equal and "~=" for a
+regular expression. If the operator and value are not present, the matcher will
+test for the presence of a value, as defined by the target object.
+
+The formal grammar is as follows:
+
+selectors := selector ("," selector)*
+selector := fieldpath (operator value)
+fieldpath := field ('.' field)*
+field := quoted | [A-Za-z] [A-Za-z0-9_]+
+operator := "==" | "!=" | "~="
+value := quoted | [^\s,]+
+quoted :=
+*/
+func Parse(s string) (Filter, error) {
+ // special case empty to match all
+ if s == "" {
+ return Always, nil
+ }
+
+ p := parser{input: s}
+ return p.parse()
+}
+
+// ParseAll parses each filter in ss and returns a filter that will return true
+// if any filter matches the expression.
+//
+// If no filters are provided, the filter will match anything.
+func ParseAll(ss ...string) (Filter, error) {
+ if len(ss) == 0 {
+ return Always, nil
+ }
+
+ var fs []Filter
+ for _, s := range ss {
+ f, err := Parse(s)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument)
+ }
+
+ fs = append(fs, f)
+ }
+
+ return Any(fs), nil
+}
+
+type parser struct {
+ input string
+ scanner scanner
+}
+
+func (p *parser) parse() (Filter, error) {
+ p.scanner.init(p.input)
+
+ ss, err := p.selectors()
+ if err != nil {
+ return nil, fmt.Errorf("filters: %w", err)
+ }
+
+ return ss, nil
+}
+
+func (p *parser) selectors() (Filter, error) {
+ s, err := p.selector()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := All{s}
+
+loop:
+ for {
+ tok := p.scanner.peek()
+ switch tok {
+ case ',':
+ pos, tok, _ := p.scanner.scan()
+ if tok != tokenSeparator {
+ return nil, p.mkerr(pos, "expected a separator")
+ }
+
+ s, err := p.selector()
+ if err != nil {
+ return nil, err
+ }
+
+ ss = append(ss, s)
+ case tokenEOF:
+ break loop
+ default:
+ return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
+ }
+ }
+
+ return ss, nil
+}
+
+func (p *parser) selector() (selector, error) {
+ fieldpath, err := p.fieldpath()
+ if err != nil {
+ return selector{}, err
+ }
+
+ switch p.scanner.peek() {
+ case ',', tokenSeparator, tokenEOF:
+ return selector{
+ fieldpath: fieldpath,
+ operator: operatorPresent,
+ }, nil
+ }
+
+ op, err := p.operator()
+ if err != nil {
+ return selector{}, err
+ }
+
+ var allowAltQuotes bool
+ if op == operatorMatches {
+ allowAltQuotes = true
+ }
+
+ value, err := p.value(allowAltQuotes)
+ if err != nil {
+ if err == io.EOF {
+ return selector{}, io.ErrUnexpectedEOF
+ }
+ return selector{}, err
+ }
+
+ return selector{
+ fieldpath: fieldpath,
+ value: value,
+ operator: op,
+ }, nil
+}
+
+func (p *parser) fieldpath() ([]string, error) {
+ f, err := p.field()
+ if err != nil {
+ return nil, err
+ }
+
+ fs := []string{f}
+loop:
+ for {
+ tok := p.scanner.peek() // lookahead to consume field separator
+
+ switch tok {
+ case '.':
+ pos, tok, _ := p.scanner.scan() // consume separator
+ if tok != tokenSeparator {
+ return nil, p.mkerr(pos, "expected a field separator (`.`)")
+ }
+
+ f, err := p.field()
+ if err != nil {
+ return nil, err
+ }
+
+ fs = append(fs, f)
+ default:
+ // let the layer above handle the other bad cases.
+ break loop
+ }
+ }
+
+ return fs, nil
+}
+
+func (p *parser) field() (string, error) {
+ pos, tok, s := p.scanner.scan()
+ switch tok {
+ case tokenField:
+ return s, nil
+ case tokenQuoted:
+ return p.unquote(pos, s, false)
+ case tokenIllegal:
+ return "", p.mkerr(pos, "%s", p.scanner.err)
+ }
+
+ return "", p.mkerr(pos, "expected field or quoted")
+}
+
+func (p *parser) operator() (operator, error) {
+ pos, tok, s := p.scanner.scan()
+ switch tok {
+ case tokenOperator:
+ switch s {
+ case "==":
+ return operatorEqual, nil
+ case "!=":
+ return operatorNotEqual, nil
+ case "~=":
+ return operatorMatches, nil
+ default:
+ return 0, p.mkerr(pos, "unsupported operator %q", s)
+ }
+ case tokenIllegal:
+ return 0, p.mkerr(pos, "%s", p.scanner.err)
+ }
+
+ return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
+}
+
+func (p *parser) value(allowAltQuotes bool) (string, error) {
+ pos, tok, s := p.scanner.scan()
+
+ switch tok {
+ case tokenValue, tokenField:
+ return s, nil
+ case tokenQuoted:
+ return p.unquote(pos, s, allowAltQuotes)
+ case tokenIllegal:
+ return "", p.mkerr(pos, "%s", p.scanner.err)
+ }
+
+ return "", p.mkerr(pos, "expected value or quoted")
+}
+
+func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
+ if !allowAlts && s[0] != '\'' && s[0] != '"' {
+ return "", p.mkerr(pos, "invalid quote encountered")
+ }
+
+ uq, err := unquote(s)
+ if err != nil {
+ return "", p.mkerr(pos, "unquoting failed: %v", err)
+ }
+
+ return uq, nil
+}
+
+type parseError struct {
+ input string
+ pos int
+ msg string
+}
+
+func (pe parseError) Error() string {
+ if pe.pos < len(pe.input) {
+ before := pe.input[:pe.pos]
+ location := pe.input[pe.pos : pe.pos+1] // need to handle end
+ after := pe.input[pe.pos+1:]
+
+ return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
+ }
+
+ return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
+}
+
+func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
+ return fmt.Errorf("parse error: %w", parseError{
+ input: p.input,
+ pos: pos,
+ msg: fmt.Sprintf(format, args...),
+ })
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/quote.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/quote.go
new file mode 100644
index 000000000000..5c800ef846cc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/quote.go
@@ -0,0 +1,252 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package filters
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
+// strconv package and modified to be able to handle quoting with `/` and `|`
+// as delimiters. The copyright is held by the Go authors.
+
+var errQuoteSyntax = errors.New("quote syntax error")
+
+// UnquoteChar decodes the first character or byte in the escaped string
+// or character literal represented by the string s.
+// It returns four values:
+//
+// 1. value, the decoded Unicode code point or byte value;
+// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+// 3. tail, the remainder of the string after the character; and
+// 4. an error that will be nil if the character is syntactically valid.
+//
+// The second argument, quote, specifies the type of literal being parsed
+// and therefore which escaped quote character is permitted.
+// If set to a single quote, it permits the sequence \' and disallows unescaped '.
+// If set to a double quote, it permits \" and disallows unescaped ".
+// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
+//
+// This is from Go strconv package, modified to support `|` and `/` as double
+// quotes for use with regular expressions.
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
+ err = errQuoteSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = errQuoteSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = errQuoteSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = errQuoteSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = errQuoteSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = errQuoteSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = errQuoteSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = errQuoteSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"', '|', '/':
+ if c != quote {
+ err = errQuoteSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = errQuoteSyntax
+ return
+ }
+ tail = s
+ return
+}
+
+// unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+//
+// This is modified from the standard library to support `|` and `/` as quote
+// characters for use with regular expressions.
+func unquote(s string) (string, error) {
+ n := len(s)
+ if n < 2 {
+ return "", errQuoteSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", errQuoteSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote == '`' {
+ if contains(s, '`') {
+ return "", errQuoteSyntax
+ }
+ if contains(s, '\r') {
+ // -1 because we know there is at least one \r to remove.
+ buf := make([]byte, 0, len(s)-1)
+ for i := 0; i < len(s); i++ {
+ if s[i] != '\r' {
+ buf = append(buf, s[i])
+ }
+ }
+ return string(buf), nil
+ }
+ return s, nil
+ }
+ if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
+ return "", errQuoteSyntax
+ }
+ if contains(s, '\n') {
+ return "", errQuoteSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) {
+ switch quote {
+ case '"', '/', '|': // pipe and slash are treated like double quote
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", errQuoteSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/scanner.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/scanner.go
new file mode 100644
index 000000000000..6a485467b8a0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/scanner.go
@@ -0,0 +1,297 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package filters
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ tokenEOF = -(iota + 1)
+ tokenQuoted
+ tokenValue
+ tokenField
+ tokenSeparator
+ tokenOperator
+ tokenIllegal
+)
+
+type token rune
+
+func (t token) String() string {
+ switch t {
+ case tokenEOF:
+ return "EOF"
+ case tokenQuoted:
+ return "Quoted"
+ case tokenValue:
+ return "Value"
+ case tokenField:
+ return "Field"
+ case tokenSeparator:
+ return "Separator"
+ case tokenOperator:
+ return "Operator"
+ case tokenIllegal:
+ return "Illegal"
+ }
+
+ return string(t)
+}
+
+func (t token) GoString() string {
+ return "token" + t.String()
+}
+
+type scanner struct {
+ input string
+ pos int
+ ppos int // bounds the current rune in the string
+ value bool
+ err string
+}
+
+func (s *scanner) init(input string) {
+ s.input = input
+ s.pos = 0
+ s.ppos = 0
+}
+
+func (s *scanner) next() rune {
+ if s.pos >= len(s.input) {
+ return tokenEOF
+ }
+ s.pos = s.ppos
+
+ r, w := utf8.DecodeRuneInString(s.input[s.ppos:])
+ s.ppos += w
+ if r == utf8.RuneError {
+ if w > 0 {
+ s.error("rune error")
+ return tokenIllegal
+ }
+ return tokenEOF
+ }
+
+ if r == 0 {
+ s.error("unexpected null")
+ return tokenIllegal
+ }
+
+ return r
+}
+
+func (s *scanner) peek() rune {
+ pos := s.pos
+ ppos := s.ppos
+ ch := s.next()
+ s.pos = pos
+ s.ppos = ppos
+ return ch
+}
+
+func (s *scanner) scan() (nextp int, tk token, text string) {
+ var (
+ ch = s.next()
+ pos = s.pos
+ )
+
+chomp:
+ switch {
+ case ch == tokenEOF:
+ case ch == tokenIllegal:
+ case isQuoteRune(ch):
+ if !s.scanQuoted(ch) {
+ return pos, tokenIllegal, s.input[pos:s.ppos]
+ }
+ return pos, tokenQuoted, s.input[pos:s.ppos]
+ case isSeparatorRune(ch):
+ s.value = false
+ return pos, tokenSeparator, s.input[pos:s.ppos]
+ case isOperatorRune(ch):
+ s.scanOperator()
+ s.value = true
+ return pos, tokenOperator, s.input[pos:s.ppos]
+ case unicode.IsSpace(ch):
+ // chomp
+ ch = s.next()
+ pos = s.pos
+ goto chomp
+ case s.value:
+ s.scanValue()
+ s.value = false
+ return pos, tokenValue, s.input[pos:s.ppos]
+ case isFieldRune(ch):
+ s.scanField()
+ return pos, tokenField, s.input[pos:s.ppos]
+ }
+
+ return s.pos, token(ch), ""
+}
+
+func (s *scanner) scanField() {
+ for {
+ ch := s.peek()
+ if !isFieldRune(ch) {
+ break
+ }
+ s.next()
+ }
+}
+
+func (s *scanner) scanOperator() {
+ for {
+ ch := s.peek()
+ switch ch {
+ case '=', '!', '~':
+ s.next()
+ default:
+ return
+ }
+ }
+}
+
+func (s *scanner) scanValue() {
+ for {
+ ch := s.peek()
+ if !isValueRune(ch) {
+ break
+ }
+ s.next()
+ }
+}
+
+func (s *scanner) scanQuoted(quote rune) bool {
+ var illegal bool
+ ch := s.next() // read character after quote
+ for ch != quote {
+ if ch == '\n' || ch < 0 {
+ s.error("quoted literal not terminated")
+ return false
+ }
+ if ch == '\\' {
+ var legal bool
+ ch, legal = s.scanEscape(quote)
+ if !legal {
+ illegal = true
+ }
+ } else {
+ ch = s.next()
+ }
+ }
+ return !illegal
+}
+
+func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) {
+ ch = s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ // nothing to do
+ ch = s.next()
+ legal = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ ch, legal = s.scanDigits(ch, 8, 3)
+ case 'x':
+ ch, legal = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ ch, legal = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ ch, legal = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.error("illegal escape sequence")
+ }
+ return
+}
+
+func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.error("illegal numeric escape sequence")
+ return ch, false
+ }
+ return ch, true
+}
+
+func (s *scanner) error(msg string) {
+ if s.err == "" {
+ s.err = msg
+ }
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func isFieldRune(r rune) bool {
+ return (r == '_' || isAlphaRune(r) || isDigitRune(r))
+}
+
+func isAlphaRune(r rune) bool {
+ return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z'
+}
+
+func isDigitRune(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isOperatorRune(r rune) bool {
+ switch r {
+ case '=', '!', '~':
+ return true
+ }
+
+ return false
+}
+
+func isQuoteRune(r rune) bool {
+ switch r {
+ case '/', '|', '"': // maybe add single quoting?
+ return true
+ }
+
+ return false
+}
+
+func isSeparatorRune(r rune) bool {
+ switch r {
+ case ',', '.':
+ return true
+ }
+
+ return false
+}
+
+func isValueRune(r rune) bool {
+ return r != ',' && !unicode.IsSpace(r) &&
+ (unicode.IsLetter(r) ||
+ unicode.IsDigit(r) ||
+ unicode.IsNumber(r) ||
+ unicode.IsGraphic(r) ||
+ unicode.IsPunct(r))
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go b/vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go
new file mode 100644
index 000000000000..a8cec6481f98
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go
@@ -0,0 +1,94 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ File copied and customized based on
+ /~https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux.go
+*/
+
+package kernelversion
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ "golang.org/x/sys/unix"
+)
+
+// KernelVersion holds information about the kernel.
+type KernelVersion struct {
+ Kernel uint64 // Version of the Kernel (i.e., the "4" in "4.1.2-generic")
+ Major uint64 // Major revision of the Kernel (i.e., the "1" in "4.1.2-generic")
+}
+
+// String implements fmt.Stringer for KernelVersion
+func (k *KernelVersion) String() string {
+ if k.Kernel > 0 || k.Major > 0 {
+ return fmt.Sprintf("%d.%d", k.Kernel, k.Major)
+ }
+ return ""
+}
+
+var (
+ currentKernelVersion *KernelVersion
+ kernelVersionError error
+ once sync.Once
+)
+
+// getKernelVersion gets the current kernel version.
+func getKernelVersion() (*KernelVersion, error) {
+ once.Do(func() {
+ var uts unix.Utsname
+ if err := unix.Uname(&uts); err != nil {
+ return
+ }
+ // Remove the \x00 from the release for Atoi to parse correctly
+ currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)]))
+ })
+ return currentKernelVersion, kernelVersionError
+}
+
+// parseRelease parses a string and creates a KernelVersion based on it.
+func parseRelease(release string) (*KernelVersion, error) {
+ var version = KernelVersion{}
+
+ // We're only make sure we get the "kernel" and "major revision". Sometimes we have
+ // 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64.
+ _, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err)
+ }
+ return &version, nil
+}
+
+// GreaterEqualThan checks if the host's kernel version is greater than, or
+// equal to the given kernel version v. Only "kernel version" and "major revision"
+// can be specified (e.g., "3.12") and will be taken into account, which means
+// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12).
+func GreaterEqualThan(minVersion KernelVersion) (bool, error) {
+ kv, err := getKernelVersion()
+ if err != nil {
+ return false, err
+ }
+ if kv.Kernel > minVersion.Kernel {
+ return true, nil
+ }
+ if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major {
+ return true, nil
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/labels/labels.go b/vendor/github.com/containerd/containerd/v2/pkg/labels/labels.go
new file mode 100644
index 000000000000..0f9bab5c5d46
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/labels/labels.go
@@ -0,0 +1,29 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package labels
+
+// LabelUncompressed is added to compressed layer contents.
+// The value is digest of the uncompressed content.
+const LabelUncompressed = "containerd.io/uncompressed"
+
+// LabelSharedNamespace is added to a namespace to allow that namespaces
+// contents to be shared.
+const LabelSharedNamespace = "containerd.io/namespace.shareable"
+
+// LabelDistributionSource is added to content to indicate its origin.
+// e.g., "containerd.io/distribution.source.docker.io=library/redis"
+const LabelDistributionSource = "containerd.io/distribution.source"
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/labels/validate.go b/vendor/github.com/containerd/containerd/v2/pkg/labels/validate.go
new file mode 100644
index 000000000000..6f23cdd7c65e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/labels/validate.go
@@ -0,0 +1,41 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package labels
+
+import (
+ "fmt"
+
+ "github.com/containerd/errdefs"
+)
+
+const (
+ maxSize = 4096
+ // maximum length of key portion of error message if len of key + len of value > maxSize
+ keyMaxLen = 64
+)
+
+// Validate a label's key and value are under 4096 bytes
+func Validate(k, v string) error {
+ total := len(k) + len(v)
+ if total > maxSize {
+ if len(k) > keyMaxLen {
+ k = k[:keyMaxLen]
+ }
+ return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/protobuf/compare.go b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/compare.go
new file mode 100644
index 000000000000..602a4bcac578
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/compare.go
@@ -0,0 +1,41 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package protobuf
+
+import (
+ "github.com/google/go-cmp/cmp"
+ "google.golang.org/protobuf/proto"
+)
+
+var Compare = cmp.FilterValues(
+ func(x, y interface{}) bool {
+ _, xok := x.(proto.Message)
+ _, yok := y.(proto.Message)
+ return xok && yok
+ },
+ cmp.Comparer(func(x, y interface{}) bool {
+ vx, ok := x.(proto.Message)
+ if !ok {
+ return false
+ }
+ vy, ok := y.(proto.Message)
+ if !ok {
+ return false
+ }
+ return proto.Equal(vx, vy)
+ }),
+)
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/protobuf/proto/proto.go b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/proto/proto.go
new file mode 100644
index 000000000000..6c5e7b75e3ad
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/proto/proto.go
@@ -0,0 +1,30 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package proto provides convinient aliases that make google.golang.org/protobuf migration easier.
+package proto
+
+import (
+ google "google.golang.org/protobuf/proto"
+)
+
+func Marshal(input google.Message) ([]byte, error) {
+ return google.Marshal(input)
+}
+
+func Unmarshal(input []byte, output google.Message) error {
+ return google.Unmarshal(input, output)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/protobuf/timestamp.go b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/timestamp.go
new file mode 100644
index 000000000000..0615f823b04a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/timestamp.go
@@ -0,0 +1,36 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package protobuf
+
+import (
+ "time"
+
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Once we migrate off from gogo/protobuf, we can use the function below, which don't return any errors.
+// /~https://github.com/protocolbuffers/protobuf-go/blob/v1.28.0/types/known/timestamppb/timestamp.pb.go#L200-L208
+
+// ToTimestamp creates protobuf's Timestamp from time.Time.
+func ToTimestamp(from time.Time) *timestamppb.Timestamp {
+ return timestamppb.New(from)
+}
+
+// FromTimestamp creates time.Time from protobuf's Timestamp.
+func FromTimestamp(from *timestamppb.Timestamp) time.Time {
+ return from.AsTime()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/protobuf/types/types.go b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/types/types.go
new file mode 100644
index 000000000000..30abb89875f9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/protobuf/types/types.go
@@ -0,0 +1,28 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package types provides convinient aliases that make google.golang.org/protobuf migration easier.
+package types
+
+import (
+ "google.golang.org/protobuf/types/known/anypb"
+ "google.golang.org/protobuf/types/known/emptypb"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+type Empty = emptypb.Empty
+type Any = anypb.Any
+type FieldMask = field_mask.FieldMask
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go b/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go
new file mode 100644
index 000000000000..d983c4e1356f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go
@@ -0,0 +1,156 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package reference
+
+import (
+ "errors"
+ "net/url"
+ "path"
+ "regexp"
+ "strings"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrInvalid is returned when there is an invalid reference
+ ErrInvalid = errors.New("invalid reference")
+ // ErrObjectRequired is returned when the object is required
+ ErrObjectRequired = errors.New("object required")
+ // ErrHostnameRequired is returned when the hostname is required
+ ErrHostnameRequired = errors.New("hostname required")
+)
+
+// Spec defines the main components of a reference specification.
+//
+// A reference specification is a schema-less URI parsed into common
+// components. The two main components, locator and object, are required to be
+// supported by remotes. It represents a superset of the naming define in
+// docker's reference schema. It aims to be compatible but not prescriptive.
+//
+// While the interpretation of the components, locator and object, are up to
+// the remote, we define a few common parts, accessible via helper methods.
+//
+// The first is the hostname, which is part of the locator. This doesn't need
+// to map to a physical resource, but it must parse as a hostname. We refer to
+// this as the namespace.
+//
+// The other component made accessible by helper method is the digest. This is
+// part of the object identifier, always prefixed with an '@'. If present, the
+// remote may use the digest portion directly or resolve it against a prefix.
+// If the object does not include the `@` symbol, the return value for `Digest`
+// will be empty.
+type Spec struct {
+ // Locator is the host and path portion of the specification. The host
+ // portion may refer to an actual host or just a namespace of related
+ // images.
+ //
+ // Typically, the locator may used to resolve the remote to fetch specific
+ // resources.
+ Locator string
+
+ // Object contains the identifier for the remote resource. Classically,
+ // this is a tag but can refer to anything in a remote. By convention, any
+ // portion that may be a partial or whole digest will be preceded by an
+ // `@`. Anything preceding the `@` will be referred to as the "tag".
+ //
+ // In practice, we will see this broken down into the following formats:
+ //
+ // 1.
+ // 2. @
+ // 3. @
+ //
+ // We define the tag to be anything except '@' and ':'. may
+ // be a full valid digest or shortened version, possibly with elided
+ // algorithm.
+ Object string
+}
+
+var splitRe = regexp.MustCompile(`[:@]`)
+
+// Parse parses the string into a structured ref.
+func Parse(s string) (Spec, error) {
+ if strings.Contains(s, "://") {
+ return Spec{}, ErrInvalid
+ }
+
+ u, err := url.Parse("dummy://" + s)
+ if err != nil {
+ return Spec{}, err
+ }
+
+ if u.Scheme != "dummy" {
+ return Spec{}, ErrInvalid
+ }
+
+ if u.Host == "" {
+ return Spec{}, ErrHostnameRequired
+ }
+
+ var object string
+
+ if idx := splitRe.FindStringIndex(u.Path); idx != nil {
+ // This allows us to retain the @ to signify digests or shortened digests in
+ // the object.
+ object = u.Path[idx[0]:]
+ if object[:1] == ":" {
+ object = object[1:]
+ }
+ u.Path = u.Path[:idx[0]]
+ }
+
+ return Spec{
+ Locator: path.Join(u.Host, u.Path),
+ Object: object,
+ }, nil
+}
+
+// Hostname returns the hostname portion of the locator.
+//
+// Remotes are not required to directly access the resources at this host. This
+// method is provided for convenience.
+func (r Spec) Hostname() string {
+ i := strings.Index(r.Locator, "/")
+
+ if i < 0 {
+ return r.Locator
+ }
+ return r.Locator[:i]
+}
+
+// Digest returns the digest portion of the reference spec. This may be a
+// partial or invalid digest, which may be used to lookup a complete digest.
+func (r Spec) Digest() digest.Digest {
+ i := strings.Index(r.Object, "@")
+
+ if i < 0 {
+ return ""
+ }
+ return digest.Digest(r.Object[i+1:])
+}
+
+// String returns the normalized string for the ref.
+func (r Spec) String() string {
+ if r.Object == "" {
+ return r.Locator
+ }
+ if r.Object[:1] == "@" {
+ return r.Locator + r.Object
+ }
+
+ return r.Locator + ":" + r.Object
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/eintr_unix.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/eintr_unix.go
new file mode 100644
index 000000000000..1106a5dbca4b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/eintr_unix.go
@@ -0,0 +1,69 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import "golang.org/x/sys/unix"
+
+// The following contents were copied from Go 1.18.2.
+// Use of this source code is governed by the following
+// BSD-style license:
+//
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// IgnoringEINTR makes a function call and repeats it if it returns an
+// EINTR error. This appears to be required even though we install all
+// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
+// Also #20400 and #36644 are issues in which a signal handler is
+// installed without setting SA_RESTART. None of these are the common case,
+// but there are enough of them that it seems that we can't avoid
+// an EINTR loop.
+func IgnoringEINTR(fn func() error) error {
+ for {
+ err := fn()
+ if err != unix.EINTR {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/filesys_unix.go
new file mode 100644
index 000000000000..333e85ceb4a8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/filesys_unix.go
@@ -0,0 +1,26 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import "os"
+
+// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems.
+func MkdirAllWithACL(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/filesys_windows.go
new file mode 100644
index 000000000000..67fc4048c1d5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/filesys_windows.go
@@ -0,0 +1,151 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System.
+const SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+
+// volumePath is a regular expression to check if a path is a Windows
+// volume path (e.g., "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}"
+// or "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}\").
+var volumePath = regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}\\?$`)
+
+// MkdirAllWithACL is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and to create a directory
+// an appropriate SDDL defined ACL for Builtin Administrators and Local System.
+func MkdirAllWithACL(path string, _ os.FileMode) error {
+ sa, err := makeSecurityAttributes(SddlAdministratorsLocalSystem)
+ if err != nil {
+ return &os.PathError{Op: "mkdirall", Path: path, Err: err}
+ }
+ return mkdirall(path, sa)
+}
+
+// MkdirAll is a custom version of os.MkdirAll that is volume path aware for
+// Windows. It can be used as a drop-in replacement for os.MkdirAll.
+func MkdirAll(path string, _ os.FileMode) error {
+ return mkdirall(path, nil)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, perm *windows.SecurityAttributes) error {
+ if volumePath.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is largely copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent.
+ err = mkdirall(fixRootDirectory(path[:j-1]), perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = mkdirWithACL(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and windows.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string, sa *windows.SecurityAttributes) error {
+ if sa == nil {
+ return os.Mkdir(name, 0)
+ }
+
+ namep, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+
+ err = windows.CreateDirectory(namep, sa)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return nil
+}
+
+// fixRootDirectory fixes a reference to a drive's root directory to
+// have the required trailing slash.
+func fixRootDirectory(p string) string {
+ if len(p) == len(`\\?\c:`) {
+ if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' {
+ return p + `\`
+ }
+ }
+ return p
+}
+
+func makeSecurityAttributes(sddl string) (*windows.SecurityAttributes, error) {
+ var sa windows.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ var err error
+ sa.SecurityDescriptor, err = windows.SecurityDescriptorFromString(sddl)
+ if err != nil {
+ return nil, err
+ }
+ return &sa, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/oom_linux.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/oom_linux.go
new file mode 100644
index 000000000000..01ebbebb5960
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/oom_linux.go
@@ -0,0 +1,82 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/moby/sys/userns"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // OOMScoreAdjMin is from OOM_SCORE_ADJ_MIN /~https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L9
+ OOMScoreAdjMin = -1000
+ // OOMScoreAdjMax is from OOM_SCORE_ADJ_MAX /~https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L10
+ OOMScoreAdjMax = 1000
+)
+
+// AdjustOOMScore sets the oom score for the provided pid. If the provided score
+// is out of range (-1000 - 1000), it is clipped to the min/max value.
+func AdjustOOMScore(pid, score int) error {
+ if score > OOMScoreAdjMax {
+ score = OOMScoreAdjMax
+ } else if score < OOMScoreAdjMin {
+ score = OOMScoreAdjMin
+ }
+ return SetOOMScore(pid, score)
+}
+
+// SetOOMScore sets the oom score for the provided pid
+func SetOOMScore(pid, score int) error {
+ if score > OOMScoreAdjMax || score < OOMScoreAdjMin {
+ return fmt.Errorf("value out of range (%d): OOM score must be between %d and %d", score, OOMScoreAdjMin, OOMScoreAdjMax)
+ }
+ path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
+ f, err := os.OpenFile(path, os.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if _, err = f.WriteString(strconv.Itoa(score)); err != nil {
+ if os.IsPermission(err) && (!runningPrivileged() || userns.RunningInUserNS()) {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// GetOOMScoreAdj gets the oom score for a process. It returns 0 (zero) if either
+// no oom score is set, or a sore is set to 0.
+func GetOOMScoreAdj(pid int) (int, error) {
+ path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.Atoi(strings.TrimSpace(string(data)))
+}
+
+// runningPrivileged returns true if the effective user ID of the
+// calling process is 0
+func runningPrivileged() bool {
+ return unix.Geteuid() == 0
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/oom_unsupported.go
new file mode 100644
index 000000000000..f57977466377
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/oom_unsupported.go
@@ -0,0 +1,48 @@
+//go:build !linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+const (
+ // OOMScoreMaxKillable is not implemented on non Linux
+ OOMScoreMaxKillable = 0
+ // OOMScoreAdjMax is not implemented on non Linux
+ OOMScoreAdjMax = 0
+)
+
+// AdjustOOMScore sets the oom score for the provided pid. If the provided score
+// is out of range (-1000 - 1000), it is clipped to the min/max value.
+//
+// Not implemented on Windows
+func AdjustOOMScore(pid, score int) error {
+ return nil
+}
+
+// SetOOMScore sets the oom score for the process
+//
+// Not implemented on Windows
+func SetOOMScore(pid, score int) error {
+ return nil
+}
+
+// GetOOMScoreAdj gets the oom score for a process
+//
+// Not implemented on Windows
+func GetOOMScoreAdj(pid int) (int, error) {
+ return 0, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/socket_unix.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/socket_unix.go
new file mode 100644
index 000000000000..5ecbeddc9142
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/socket_unix.go
@@ -0,0 +1,80 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/unix"
+)
+
+// CreateUnixSocket creates a unix socket and returns the listener
+func CreateUnixSocket(path string) (net.Listener, error) {
+ // BSDs have a 104 limit
+ if len(path) > 104 {
+ return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil {
+ return nil, err
+ }
+ if err := unix.Unlink(path); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return net.Listen("unix", path)
+}
+
+// GetLocalListener returns a listener out of a unix socket.
+func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
+ // Ensure parent directory is created
+ if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil {
+ return nil, err
+ }
+
+ l, err := CreateUnixSocket(path)
+ if err != nil {
+ return l, err
+ }
+
+ if err := os.Chmod(path, 0660); err != nil {
+ l.Close()
+ return nil, err
+ }
+
+ if err := os.Chown(path, uid, gid); err != nil {
+ l.Close()
+ return nil, err
+ }
+
+ return l, nil
+}
+
+func mkdirAs(path string, uid, gid int) error {
+ if _, err := os.Stat(path); !os.IsNotExist(err) {
+ return err
+ }
+
+ if err := os.MkdirAll(path, 0770); err != nil {
+ return err
+ }
+
+ return os.Chown(path, uid, gid)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/socket_windows.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/socket_windows.go
new file mode 100644
index 000000000000..1ae12bc51142
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/socket_windows.go
@@ -0,0 +1,30 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import (
+ "net"
+
+ "github.com/Microsoft/go-winio"
+)
+
+// GetLocalListener returns a Listernet out of a named pipe.
+// `path` must be of the form of `\\.\pipe\`
+// (see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150)
+func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
+ return winio.ListenPipe(path, nil)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/subprocess_unsafe_linux.go
new file mode 100644
index 000000000000..6e40a9c7d7f0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/subprocess_unsafe_linux.go
@@ -0,0 +1,30 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import (
+ _ "unsafe" // required for go:linkname.
+)
+
+//go:linkname beforeFork syscall.runtime_BeforeFork
+func beforeFork()
+
+//go:linkname afterFork syscall.runtime_AfterFork
+func afterFork()
+
+//go:linkname afterForkInChild syscall.runtime_AfterForkInChild
+func afterForkInChild()
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/sys/userns_unsafe_linux.go b/vendor/github.com/containerd/containerd/v2/pkg/sys/userns_unsafe_linux.go
new file mode 100644
index 000000000000..4ebe46c32aa4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/sys/userns_unsafe_linux.go
@@ -0,0 +1,94 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sys
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+// ForkUserns is to fork child process with user namespace. It returns child
+// process's pid and pidfd reference to the child process.
+//
+// Precondition: The runtime OS thread must be locked, which is GO runtime
+// requirement.
+//
+// Beside this, the child process sets PR_SET_PDEATHSIG with SIGKILL so that
+// the parent process's OS thread must be locked. Otherwise, the exit event of
+// parent process's OS thread will send kill signal to the child process,
+// even if parent process is still running.
+//
+//go:norace
+//go:noinline
+func ForkUserns() (_pid uintptr, _pidfd uintptr, _ syscall.Errno) {
+ var (
+ pidfd uintptr
+ pid, ppid uintptr
+ err syscall.Errno
+ )
+
+ ppid, _, err = syscall.RawSyscall(uintptr(syscall.SYS_GETPID), 0, 0, 0)
+ if err != 0 {
+ return 0, 0, err
+ }
+
+ beforeFork()
+ if runtime.GOARCH == "s390x" {
+ // NOTE:
+ //
+ // On the s390 architectures, the order of the first two
+ // arguments is reversed.
+ //
+ // REF: https://man7.org/linux/man-pages/man2/clone.2.html
+ pid, _, err = syscall.RawSyscall(syscall.SYS_CLONE,
+ 0,
+ uintptr(syscall.CLONE_NEWUSER|syscall.SIGCHLD|syscall.CLONE_PIDFD),
+ uintptr(unsafe.Pointer(&pidfd)),
+ )
+ } else {
+ pid, _, err = syscall.RawSyscall(syscall.SYS_CLONE,
+ uintptr(syscall.CLONE_NEWUSER|syscall.SIGCHLD|syscall.CLONE_PIDFD),
+ 0,
+ uintptr(unsafe.Pointer(&pidfd)),
+ )
+ }
+ if err != 0 || pid != 0 {
+ afterFork()
+ return pid, pidfd, err
+ }
+ afterForkInChild()
+
+ if _, _, err = syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, uintptr(syscall.SIGKILL), 0); err != 0 {
+ goto err
+ }
+
+ pid, _, err = syscall.RawSyscall(syscall.SYS_GETPPID, 0, 0, 0)
+ if err != 0 {
+ goto err
+ }
+
+ // exit if re-parent
+ if pid != ppid {
+ goto err
+ }
+
+ _, _, err = syscall.RawSyscall(syscall.SYS_PPOLL, 0, 0, 0)
+err:
+ syscall.RawSyscall(syscall.SYS_EXIT, uintptr(err), 0, 0)
+ panic("unreachable")
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/tracing/helpers.go b/vendor/github.com/containerd/containerd/v2/pkg/tracing/helpers.go
new file mode 100644
index 000000000000..ab1278ef1fcf
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/tracing/helpers.go
@@ -0,0 +1,85 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+func keyValue(k string, v any) attribute.KeyValue {
+ if v == nil {
+ return attribute.String(k, "")
+ }
+
+ switch typed := v.(type) {
+ case bool:
+ return attribute.Bool(k, typed)
+ case []bool:
+ return attribute.BoolSlice(k, typed)
+ case int:
+ return attribute.Int(k, typed)
+ case []int:
+ return attribute.IntSlice(k, typed)
+ case int8:
+ return attribute.Int(k, int(typed))
+ case []int8:
+ ls := make([]int, 0, len(typed))
+ for _, i := range typed {
+ ls = append(ls, int(i))
+ }
+ return attribute.IntSlice(k, ls)
+ case int16:
+ return attribute.Int(k, int(typed))
+ case []int16:
+ ls := make([]int, 0, len(typed))
+ for _, i := range typed {
+ ls = append(ls, int(i))
+ }
+ return attribute.IntSlice(k, ls)
+ case int32:
+ return attribute.Int64(k, int64(typed))
+ case []int32:
+ ls := make([]int64, 0, len(typed))
+ for _, i := range typed {
+ ls = append(ls, int64(i))
+ }
+ return attribute.Int64Slice(k, ls)
+ case int64:
+ return attribute.Int64(k, typed)
+ case []int64:
+ return attribute.Int64Slice(k, typed)
+ case float64:
+ return attribute.Float64(k, typed)
+ case []float64:
+ return attribute.Float64Slice(k, typed)
+ case string:
+ return attribute.String(k, typed)
+ case []string:
+ return attribute.StringSlice(k, typed)
+ }
+
+ if stringer, ok := v.(fmt.Stringer); ok {
+ return attribute.String(k, stringer.String())
+ }
+ if b, err := json.Marshal(v); b != nil && err == nil {
+ return attribute.String(k, string(b))
+ }
+ return attribute.String(k, fmt.Sprintf("%v", v))
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/tracing/log.go b/vendor/github.com/containerd/containerd/v2/pkg/tracing/log.go
new file mode 100644
index 000000000000..3af24a294a50
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/tracing/log.go
@@ -0,0 +1,82 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "github.com/containerd/log"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// allLevels is the equivalent to [logrus.AllLevels].
+//
+// [logrus.AllLevels]: /~https://github.com/sirupsen/logrus/blob/v1.9.3/logrus.go#L80-L89
+var allLevels = []log.Level{
+ log.PanicLevel,
+ log.FatalLevel,
+ log.ErrorLevel,
+ log.WarnLevel,
+ log.InfoLevel,
+ log.DebugLevel,
+ log.TraceLevel,
+}
+
+// NewLogrusHook creates a new logrus hook
+func NewLogrusHook() *LogrusHook {
+ return &LogrusHook{}
+}
+
+// LogrusHook is a [logrus.Hook] which adds logrus events to active spans.
+// If the span is not recording or the span context is invalid, the hook
+// is a no-op.
+//
+// [logrus.Hook]: /~https://github.com/sirupsen/logrus/blob/v1.9.3/hooks.go#L3-L11
+type LogrusHook struct{}
+
+// Levels returns the logrus levels that this hook is interested in.
+func (h *LogrusHook) Levels() []log.Level {
+ return allLevels
+}
+
+// Fire is called when a log event occurs.
+func (h *LogrusHook) Fire(entry *log.Entry) error {
+ span := trace.SpanFromContext(entry.Context)
+ if span == nil {
+ return nil
+ }
+
+ if !span.IsRecording() || !span.SpanContext().IsValid() {
+ return nil
+ }
+
+ span.AddEvent(
+ entry.Message,
+ trace.WithAttributes(logrusDataToAttrs(entry.Data)...),
+ trace.WithAttributes(attribute.String("level", entry.Level.String())),
+ trace.WithTimestamp(entry.Time),
+ )
+
+ return nil
+}
+
+func logrusDataToAttrs(data map[string]any) []attribute.KeyValue {
+ attrs := make([]attribute.KeyValue, 0, len(data))
+ for k, v := range data {
+ attrs = append(attrs, keyValue(k, v))
+ }
+ return attrs
+}
diff --git a/vendor/github.com/containerd/containerd/v2/pkg/tracing/tracing.go b/vendor/github.com/containerd/containerd/v2/pkg/tracing/tracing.go
new file mode 100644
index 000000000000..48d760feb8f7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/pkg/tracing/tracing.go
@@ -0,0 +1,132 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// StartConfig defines configuration for a new span object.
+type StartConfig struct {
+ spanOpts []trace.SpanStartOption
+}
+
+type SpanOpt func(config *StartConfig)
+
+// WithAttribute appends attributes to a new created span.
+func WithAttribute(k string, v interface{}) SpanOpt {
+ return func(config *StartConfig) {
+ config.spanOpts = append(config.spanOpts,
+ trace.WithAttributes(Attribute(k, v)))
+ }
+}
+
+// UpdateHTTPClient updates the http client with the necessary otel transport
+func UpdateHTTPClient(client *http.Client, name string) {
+ client.Transport = otelhttp.NewTransport(
+ client.Transport,
+ otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string {
+ return name
+ }),
+ )
+}
+
+// StartSpan starts child span in a context.
+func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) {
+ config := StartConfig{}
+ for _, fn := range opts {
+ fn(&config)
+ }
+ tracer := otel.Tracer("")
+ if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() {
+ tracer = parent.TracerProvider().Tracer("")
+ }
+ ctx, span := tracer.Start(ctx, opName, config.spanOpts...)
+ return ctx, &Span{otelSpan: span}
+}
+
+// SpanFromContext returns the current Span from the context.
+func SpanFromContext(ctx context.Context) *Span {
+ return &Span{
+ otelSpan: trace.SpanFromContext(ctx),
+ }
+}
+
+// Span is wrapper around otel trace.Span.
+// Span is the individual component of a trace. It represents a
+// single named and timed operation of a workflow that is traced.
+type Span struct {
+ otelSpan trace.Span
+}
+
+// End completes the span.
+func (s *Span) End() {
+ s.otelSpan.End()
+}
+
+// AddEvent adds an event with provided name and options.
+func (s *Span) AddEvent(name string, attributes ...attribute.KeyValue) {
+ s.otelSpan.AddEvent(name, trace.WithAttributes(attributes...))
+}
+
+// RecordError will record err as an exception span event for this span
+func (s *Span) RecordError(err error, options ...trace.EventOption) {
+ s.otelSpan.RecordError(err, options...)
+}
+
+// SetStatus sets the status of the current span.
+// If an error is encountered, it records the error and sets span status to Error.
+func (s *Span) SetStatus(err error) {
+ if err != nil {
+ s.otelSpan.RecordError(err)
+ s.otelSpan.SetStatus(codes.Error, err.Error())
+ } else {
+ s.otelSpan.SetStatus(codes.Ok, "")
+ }
+}
+
+// SetAttributes sets kv as attributes of the span.
+func (s *Span) SetAttributes(kv ...attribute.KeyValue) {
+ s.otelSpan.SetAttributes(kv...)
+}
+
+const spanDelimiter = "."
+
+// Name sets the span name by joining a list of strings in dot separated format.
+func Name(names ...string) string {
+ return strings.Join(names, spanDelimiter)
+}
+
+// Attribute takes a key value pair and returns attribute.KeyValue type.
+func Attribute(k string, v any) attribute.KeyValue {
+ return keyValue(k, v)
+}
+
+// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry
+// specification for a span.
+func HTTPStatusCodeAttributes(code int) []attribute.KeyValue {
+ return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)}
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go
new file mode 100644
index 000000000000..39ec84b375b1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go
@@ -0,0 +1,76 @@
+//go:build gofuzz
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ _ "crypto/sha256"
+ "io"
+ "testing"
+
+ "github.com/opencontainers/go-digest"
+
+ "github.com/containerd/containerd/v2/core/content"
+)
+
+func FuzzContentStoreWriter(data []byte) int {
+ t := &testing.T{}
+ ctx := context.Background()
+ ctx, _, cs, cleanup := contentStoreEnv(t)
+ defer cleanup()
+
+ cw, err := cs.Writer(ctx, content.WithRef("myref"))
+ if err != nil {
+ return 0
+ }
+ if err := cw.Close(); err != nil {
+ return 0
+ }
+
+ // reopen, so we can test things
+ cw, err = cs.Writer(ctx, content.WithRef("myref"))
+ if err != nil {
+ return 0
+ }
+
+ err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data))))
+ if err != nil {
+ return 0
+ }
+ expected := digest.FromBytes(data)
+
+ if err = cw.Commit(ctx, int64(len(data)), expected); err != nil {
+ return 0
+ }
+ return 1
+}
+
+func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error {
+ nn, err := io.Copy(dst, src)
+ if err != nil {
+ return err
+ }
+
+ if nn != size {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/locks.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/locks.go
new file mode 100644
index 000000000000..4caffcc02f46
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/locks.go
@@ -0,0 +1,62 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/containerd/errdefs"
+)
+
+// Handles locking references
+
+type lock struct {
+ since time.Time
+}
+
+var (
+ // locks lets us lock in process
+ locks = make(map[string]*lock)
+ locksMu sync.Mutex
+)
+
+func tryLock(ref string) error {
+ locksMu.Lock()
+ defer locksMu.Unlock()
+
+ if v, ok := locks[ref]; ok {
+ // Returning the duration may help developers distinguish dead locks (long duration) from
+ // lock contentions (short duration).
+ now := time.Now()
+ return fmt.Errorf(
+ "ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since,
+ errdefs.ErrUnavailable,
+ )
+ }
+
+ locks[ref] = &lock{time.Now()}
+ return nil
+}
+
+func unlock(ref string) {
+ locksMu.Lock()
+ defer locksMu.Unlock()
+
+ delete(locks, ref)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/readerat.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/readerat.go
new file mode 100644
index 000000000000..59198fd6e92c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/readerat.go
@@ -0,0 +1,72 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/errdefs"
+)
+
+// readerat implements io.ReaderAt in a completely stateless manner by opening
+// the referenced file for each call to ReadAt.
+type sizeReaderAt struct {
+ size int64
+ fp *os.File
+}
+
+// OpenReader creates ReaderAt from a file
+func OpenReader(p string) (content.ReaderAt, error) {
+ fi, err := os.Stat(p)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
+ }
+
+ fp, err := os.Open(p)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
+ }
+
+ return sizeReaderAt{size: fi.Size(), fp: fp}, nil
+}
+
+func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) {
+ return ra.fp.ReadAt(p, offset)
+}
+
+func (ra sizeReaderAt) Size() int64 {
+ return ra.size
+}
+
+func (ra sizeReaderAt) Close() error {
+ return ra.fp.Close()
+}
+
+func (ra sizeReaderAt) Reader() io.Reader {
+ return io.LimitReader(ra.fp, ra.size)
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store.go
new file mode 100644
index 000000000000..bc3102ffea00
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store.go
@@ -0,0 +1,692 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/internal/fsverity"
+ "github.com/containerd/containerd/v2/pkg/filters"
+
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ buffer := make([]byte, 1<<20)
+ return &buffer
+ },
+}
+
+// LabelStore is used to store mutable labels for digests
+type LabelStore interface {
+ // Get returns all the labels for the given digest
+ Get(digest.Digest) (map[string]string, error)
+
+ // Set sets all the labels for a given digest
+ Set(digest.Digest, map[string]string) error
+
+ // Update replaces the given labels for a digest,
+ // a key with an empty value removes a label.
+ Update(digest.Digest, map[string]string) (map[string]string, error)
+}
+
+// Store is digest-keyed store for content. All data written into the store is
+// stored under a verifiable digest.
+//
+// Store can generally support multi-reader, single-writer ingest of data,
+// including resumable ingest.
+type store struct {
+ root string
+ ls LabelStore
+ integritySupported bool
+}
+
+// NewStore returns a local content store
+func NewStore(root string) (content.Store, error) {
+ return NewLabeledStore(root, nil)
+}
+
+// NewLabeledStore returns a new content store using the provided label store
+//
+// Note: content stores which are used underneath a metadata store may not
+// require labels and should use `NewStore`. `NewLabeledStore` is primarily
+// useful for tests or standalone implementations.
+func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
+ if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
+ return nil, err
+ }
+
+ supported, _ := fsverity.IsSupported(root)
+
+ return &store{
+ root: root,
+ ls: ls,
+ integritySupported: supported,
+ }, nil
+}
+
+func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
+ p, err := s.blobPath(dgst)
+ if err != nil {
+ return content.Info{}, fmt.Errorf("calculating blob info path: %w", err)
+ }
+
+ fi, err := os.Stat(p)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
+ }
+
+ return content.Info{}, err
+ }
+ var labels map[string]string
+ if s.ls != nil {
+ labels, err = s.ls.Get(dgst)
+ if err != nil {
+ return content.Info{}, err
+ }
+ }
+ return s.info(dgst, fi, labels), nil
+}
+
+func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info {
+ return content.Info{
+ Digest: dgst,
+ Size: fi.Size(),
+ CreatedAt: fi.ModTime(),
+ UpdatedAt: getATime(fi),
+ Labels: labels,
+ }
+}
+
+// ReaderAt returns an io.ReaderAt for the blob.
+func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
+ p, err := s.blobPath(desc.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err)
+ }
+
+ reader, err := OpenReader(p)
+ if err != nil {
+ return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err)
+ }
+
+ return reader, nil
+}
+
+// Delete removes a blob by its digest.
+//
+// While this is safe to do concurrently, safe exist-removal logic must hold
+// some global lock on the store.
+func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
+ bp, err := s.blobPath(dgst)
+ if err != nil {
+ return fmt.Errorf("calculating blob path for delete: %w", err)
+ }
+
+ if err := os.RemoveAll(bp); err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
+ }
+
+ return nil
+}
+
+func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
+ if s.ls == nil {
+ return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition)
+ }
+
+ p, err := s.blobPath(info.Digest)
+ if err != nil {
+ return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err)
+ }
+
+ fi, err := os.Stat(p)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound)
+ }
+
+ return content.Info{}, err
+ }
+
+ var (
+ all bool
+ labels map[string]string
+ )
+ if len(fieldpaths) > 0 {
+ for _, path := range fieldpaths {
+ if strings.HasPrefix(path, "labels.") {
+ if labels == nil {
+ labels = map[string]string{}
+ }
+
+ key := strings.TrimPrefix(path, "labels.")
+ labels[key] = info.Labels[key]
+ continue
+ }
+
+ switch path {
+ case "labels":
+ all = true
+ labels = info.Labels
+ default:
+ return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument)
+ }
+ }
+ } else {
+ all = true
+ labels = info.Labels
+ }
+
+ if all {
+ err = s.ls.Set(info.Digest, labels)
+ } else {
+ labels, err = s.ls.Update(info.Digest, labels)
+ }
+ if err != nil {
+ return content.Info{}, err
+ }
+
+ info = s.info(info.Digest, fi, labels)
+ info.UpdatedAt = time.Now()
+
+ if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil {
+ log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest)
+ }
+
+ return info, nil
+}
+
+func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
+ root := filepath.Join(s.root, "blobs")
+
+ filter, err := filters.ParseAll(fs...)
+ if err != nil {
+ return err
+ }
+
+ var alg digest.Algorithm
+ return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() && !alg.Available() {
+ return nil
+ }
+
+ // TODO(stevvooe): There are few more cases with subdirs that should be
+ // handled in case the layout gets corrupted. This isn't strict enough
+ // and may spew bad data.
+
+ if path == root {
+ return nil
+ }
+ if filepath.Dir(path) == root {
+ alg = digest.Algorithm(filepath.Base(path))
+
+ if !alg.Available() {
+ alg = ""
+ return filepath.SkipDir
+ }
+
+ // descending into a hash directory
+ return nil
+ }
+
+ dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path))
+ if err := dgst.Validate(); err != nil {
+ // log error but don't report
+ log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
+ // if we see this, it could mean some sort of corruption of the
+ // store or extra paths not expected previously.
+ }
+
+ var labels map[string]string
+ if s.ls != nil {
+ labels, err = s.ls.Get(dgst)
+ if err != nil {
+ return err
+ }
+ }
+
+ info := s.info(dgst, fi, labels)
+ if !filter.Match(content.AdaptInfo(info)) {
+ return nil
+ }
+ return fn(info)
+ })
+}
+
+func (s *store) Status(ctx context.Context, ref string) (content.Status, error) {
+ return s.status(s.ingestRoot(ref))
+}
+
+func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
+ fp, err := os.Open(filepath.Join(s.root, "ingest"))
+ if err != nil {
+ return nil, err
+ }
+ defer fp.Close()
+
+ fis, err := fp.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ filter, err := filters.ParseAll(fs...)
+ if err != nil {
+ return nil, err
+ }
+
+ var active []content.Status
+ for _, fi := range fis {
+ p := filepath.Join(s.root, "ingest", fi)
+ stat, err := s.status(p)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ // TODO(stevvooe): This is a common error if uploads are being
+ // completed while making this listing. Need to consider taking a
+ // lock on the whole store to coordinate this aspect.
+ //
+ // Another option is to cleanup downloads asynchronously and
+ // coordinate this method with the cleanup process.
+ //
+ // For now, we just skip them, as they really don't exist.
+ continue
+ }
+
+ if filter.Match(adaptStatus(stat)) {
+ active = append(active, stat)
+ }
+ }
+
+ return active, nil
+}
+
+// WalkStatusRefs is used to walk all status references
+// Failed status reads will be logged and ignored, if
+// this function is called while references are being altered,
+// these error messages may be produced.
+func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error {
+ fp, err := os.Open(filepath.Join(s.root, "ingest"))
+ if err != nil {
+ return err
+ }
+ defer fp.Close()
+
+ fis, err := fp.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+
+ for _, fi := range fis {
+ rf := filepath.Join(s.root, "ingest", fi, "ref")
+
+ ref, err := readFileString(rf)
+ if err != nil {
+ log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref")
+ continue
+ }
+
+ if err := fn(ref); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// status works like stat above except uses the path to the ingest.
+func (s *store) status(ingestPath string) (content.Status, error) {
+ dp := filepath.Join(ingestPath, "data")
+ fi, err := os.Stat(dp)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
+ }
+ return content.Status{}, err
+ }
+
+ ref, err := readFileString(filepath.Join(ingestPath, "ref"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
+ }
+ return content.Status{}, err
+ }
+
+ startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
+ if err != nil {
+ return content.Status{}, fmt.Errorf("could not read startedat: %w", err)
+ }
+
+ updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
+ if err != nil {
+ return content.Status{}, fmt.Errorf("could not read updatedat: %w", err)
+ }
+
+ // because we don't write updatedat on every write, the mod time may
+ // actually be more up to date.
+ if fi.ModTime().After(updatedAt) {
+ updatedAt = fi.ModTime()
+ }
+
+ return content.Status{
+ Ref: ref,
+ Offset: fi.Size(),
+ Total: s.total(ingestPath),
+ UpdatedAt: updatedAt,
+ StartedAt: startedAt,
+ }, nil
+}
+
+func adaptStatus(status content.Status) filters.Adaptor {
+ return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+ if len(fieldpath) == 0 {
+ return "", false
+ }
+ switch fieldpath[0] {
+ case "ref":
+ return status.Ref, true
+ }
+
+ return "", false
+ })
+}
+
+// total attempts to resolve the total expected size for the write.
+func (s *store) total(ingestPath string) int64 {
+ totalS, err := readFileString(filepath.Join(ingestPath, "total"))
+ if err != nil {
+ return 0
+ }
+
+ total, err := strconv.ParseInt(totalS, 10, 64)
+ if err != nil {
+ // represents a corrupted file, should probably remove.
+ return 0
+ }
+
+ return total
+}
+
+// Writer begins or resumes the active writer identified by ref. If the writer
+// is already in use, an error is returned. Only one writer may be in use per
+// ref at a time.
+//
+// The argument `ref` is used to uniquely identify a long-lived writer transaction.
+func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
+ var wOpts content.WriterOpts
+ for _, opt := range opts {
+ if err := opt(&wOpts); err != nil {
+ return nil, err
+ }
+ }
+ // TODO(AkihiroSuda): we could create a random string or one calculated based on the context
+ // /~https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
+ if wOpts.Ref == "" {
+ return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
+ }
+
+ if err := tryLock(wOpts.Ref); err != nil {
+ return nil, err
+ }
+
+ w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
+ if err != nil {
+ unlock(wOpts.Ref)
+ return nil, err
+ }
+
+ return w, nil // lock is now held by w.
+}
+
+func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
+ path, _, data := s.ingestPaths(ref)
+ status, err := s.status(path)
+ if err != nil {
+ return status, fmt.Errorf("failed reading status of resume write: %w", err)
+ }
+ if ref != status.Ref {
+ // NOTE(stevvooe): This is fairly catastrophic. Either we have some
+ // layout corruption or a hash collision for the ref key.
+ return status, fmt.Errorf("ref key does not match: %v != %v", ref, status.Ref)
+ }
+
+ if total > 0 && status.Total > 0 && total != status.Total {
+ return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total)
+ }
+
+ //nolint:dupword
+ // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
+ fp, err := os.Open(data)
+ if err != nil {
+ return status, err
+ }
+
+ p := bufPool.Get().(*[]byte)
+ status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
+ bufPool.Put(p)
+ fp.Close()
+ return status, err
+}
+
+// writer provides the main implementation of the Writer method. The caller
+// must hold the lock correctly and release on error if there is a problem.
+func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
+ // TODO(stevvooe): Need to actually store expected here. We have
+ // code in the service that shouldn't be dealing with this.
+ if expected != "" {
+ p, err := s.blobPath(expected)
+ if err != nil {
+ return nil, fmt.Errorf("calculating expected blob path for writer: %w", err)
+ }
+ if _, err := os.Stat(p); err == nil {
+ return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists)
+ }
+ }
+
+ path, refp, data := s.ingestPaths(ref)
+
+ var (
+ digester = digest.Canonical.Digester()
+ offset int64
+ startedAt time.Time
+ updatedAt time.Time
+ )
+
+ foundValidIngest := false
+ // ensure that the ingest path has been created.
+ if err := os.Mkdir(path, 0755); err != nil {
+ if !os.IsExist(err) {
+ return nil, err
+ }
+ status, err := s.resumeStatus(ref, total, digester)
+ if err == nil {
+ foundValidIngest = true
+ updatedAt = status.UpdatedAt
+ startedAt = status.StartedAt
+ total = status.Total
+ offset = status.Offset
+ } else {
+ log.G(ctx).Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
+ }
+ }
+
+ if !foundValidIngest {
+ startedAt = time.Now()
+ updatedAt = startedAt
+
+ // the ingest is new, we need to setup the target location.
+ // write the ref to a file for later use
+ if err := os.WriteFile(refp, []byte(ref), 0666); err != nil {
+ return nil, err
+ }
+
+ if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
+ return nil, err
+ }
+
+ if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
+ return nil, err
+ }
+
+ if total > 0 {
+ if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open data file: %w", err)
+ }
+
+ if _, err := fp.Seek(offset, io.SeekStart); err != nil {
+ fp.Close()
+ return nil, fmt.Errorf("could not seek to current write offset: %w", err)
+ }
+
+ return &writer{
+ s: s,
+ fp: fp,
+ ref: ref,
+ path: path,
+ offset: offset,
+ total: total,
+ digester: digester,
+ startedAt: startedAt,
+ updatedAt: updatedAt,
+ }, nil
+}
+
+// Abort an active transaction keyed by ref. If the ingest is active, it will
+// be cancelled. Any resources associated with the ingest will be cleaned.
+func (s *store) Abort(ctx context.Context, ref string) error {
+ root := s.ingestRoot(ref)
+ if err := os.RemoveAll(root); err != nil {
+ if os.IsNotExist(err) {
+ return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound)
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (s *store) blobPath(dgst digest.Digest) (string, error) {
+ if err := dgst.Validate(); err != nil {
+ return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument)
+ }
+
+ return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Encoded()), nil
+}
+
+func (s *store) ingestRoot(ref string) string {
+ // we take a digest of the ref to keep the ingest paths constant length.
+ // Note that this is not the current or potential digest of incoming content.
+ dgst := digest.FromString(ref)
+ return filepath.Join(s.root, "ingest", dgst.Encoded())
+}
+
+// ingestPaths are returned. The paths are the following:
+//
+// - root: entire ingest directory
+// - ref: name of the starting ref, must be unique
+// - data: file where data is written
+func (s *store) ingestPaths(ref string) (string, string, string) {
+ var (
+ fp = s.ingestRoot(ref)
+ rp = filepath.Join(fp, "ref")
+ dp = filepath.Join(fp, "data")
+ )
+
+ return fp, rp, dp
+}
+
+func readFileString(path string) (string, error) {
+ p, err := os.ReadFile(path)
+ return string(p), err
+}
+
+// readFileTimestamp reads a file with just a timestamp present.
+func readFileTimestamp(p string) (time.Time, error) {
+ b, err := os.ReadFile(p)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
+ }
+ return time.Time{}, err
+ }
+
+ var t time.Time
+ if err := t.UnmarshalText(b); err != nil {
+ return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err)
+ }
+
+ return t, nil
+}
+
+func writeTimestampFile(p string, t time.Time) error {
+ b, err := t.MarshalText()
+ if err != nil {
+ return err
+ }
+ return writeToCompletion(p, b, 0666)
+}
+
+func writeToCompletion(path string, data []byte, mode os.FileMode) error {
+ tmp := fmt.Sprintf("%s.tmp", path)
+ f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
+ if err != nil {
+ return fmt.Errorf("create tmp file: %w", err)
+ }
+ _, err = f.Write(data)
+ f.Close()
+ if err != nil {
+ return fmt.Errorf("write tmp file: %w", err)
+ }
+ err = os.Rename(tmp, path)
+ if err != nil {
+ return fmt.Errorf("rename tmp file: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_bsd.go
new file mode 100644
index 000000000000..7dcc1923270e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_bsd.go
@@ -0,0 +1,33 @@
+//go:build darwin || freebsd || netbsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getATime(fi os.FileInfo) time.Time {
+ if st, ok := fi.Sys().(*syscall.Stat_t); ok {
+ return time.Unix(st.Atimespec.Unix())
+ }
+
+ return fi.ModTime()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_openbsd.go
new file mode 100644
index 000000000000..45dfa9997e19
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_openbsd.go
@@ -0,0 +1,33 @@
+//go:build openbsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getATime(fi os.FileInfo) time.Time {
+ if st, ok := fi.Sys().(*syscall.Stat_t); ok {
+ return time.Unix(st.Atim.Unix())
+ }
+
+ return fi.ModTime()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_unix.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_unix.go
new file mode 100644
index 000000000000..cb01c91c7fea
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_unix.go
@@ -0,0 +1,33 @@
+//go:build linux || solaris
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getATime(fi os.FileInfo) time.Time {
+ if st, ok := fi.Sys().(*syscall.Stat_t); ok {
+ return time.Unix(st.Atim.Unix())
+ }
+
+ return fi.ModTime()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_windows.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_windows.go
new file mode 100644
index 000000000000..bce849979099
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_windows.go
@@ -0,0 +1,26 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "os"
+ "time"
+)
+
+func getATime(fi os.FileInfo) time.Time {
+ return fi.ModTime()
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go
new file mode 100644
index 000000000000..a4c238c47c8f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go
@@ -0,0 +1,38 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "context"
+ "testing"
+
+ "github.com/containerd/containerd/v2/core/content"
+)
+
+func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) {
+ tmpdir := t.TempDir()
+
+ cs, err := NewStore(tmpdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ return ctx, tmpdir, cs, func() {
+ cancel()
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/writer.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/writer.go
new file mode 100644
index 000000000000..5058912b7fc9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/writer.go
@@ -0,0 +1,226 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package local
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "time"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/opencontainers/go-digest"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/internal/fsverity"
+)
+
+// writer represents a write transaction against the blob store.
+type writer struct {
+ s *store
+ fp *os.File // opened data file
+ path string // path to writer dir
+ ref string // ref key
+ offset int64
+ total int64
+ digester digest.Digester
+ startedAt time.Time
+ updatedAt time.Time
+}
+
+func (w *writer) Status() (content.Status, error) {
+ return content.Status{
+ Ref: w.ref,
+ Offset: w.offset,
+ Total: w.total,
+ StartedAt: w.startedAt,
+ UpdatedAt: w.updatedAt,
+ }, nil
+}
+
+// Digest returns the current digest of the content, up to the current write.
+//
+// Cannot be called concurrently with `Write`.
+func (w *writer) Digest() digest.Digest {
+ return w.digester.Digest()
+}
+
+// Write p to the transaction.
+//
+// Note that writes are unbuffered to the backing file. When writing, it is
+// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
+func (w *writer) Write(p []byte) (n int, err error) {
+ n, err = w.fp.Write(p)
+ w.digester.Hash().Write(p[:n])
+ w.offset += int64(len(p))
+ w.updatedAt = time.Now()
+ return n, err
+}
+
+func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+ // Ensure even on error the writer is fully closed
+ defer unlock(w.ref)
+
+ var base content.Info
+ for _, opt := range opts {
+ if err := opt(&base); err != nil {
+ return err
+ }
+ }
+
+ fp := w.fp
+ w.fp = nil
+
+ if fp == nil {
+ return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition)
+ }
+
+ if err := fp.Sync(); err != nil {
+ fp.Close()
+ return fmt.Errorf("sync failed: %w", err)
+ }
+
+ fi, err := fp.Stat()
+ closeErr := fp.Close()
+ if err != nil {
+ return fmt.Errorf("stat on ingest file failed: %w", err)
+ }
+ if closeErr != nil {
+ return fmt.Errorf("failed to close ingest file: %w", closeErr)
+ }
+
+ if size > 0 && size != fi.Size() {
+ return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition)
+ }
+
+ dgst := w.digester.Digest()
+ if expected != "" && expected != dgst {
+ return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition)
+ }
+
+ var (
+ ingest = filepath.Join(w.path, "data")
+ target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst
+ )
+
+ // make sure parent directories of blob exist
+ if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(target); err == nil {
+ // collision with the target file!
+ if err := os.RemoveAll(w.path); err != nil {
+ log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
+ }
+ return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists)
+ }
+
+ if err := os.Rename(ingest, target); err != nil {
+ return err
+ }
+
+ // Enable content blob integrity verification if supported
+
+ if w.s.integritySupported {
+ if err := fsverity.Enable(target); err != nil {
+ log.G(ctx).Warnf("failed to enable integrity for blob %v: %s", target, err.Error())
+ }
+ }
+
+ // Ingest has now been made available in the content store, attempt to complete
+ // setting metadata but errors should only be logged and not returned since
+ // the content store cannot be cleanly rolled back.
+
+ commitTime := time.Now()
+ if err := os.Chtimes(target, commitTime, commitTime); err != nil {
+ log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time")
+ }
+
+ // clean up!!
+ if err := os.RemoveAll(w.path); err != nil {
+ log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
+ }
+
+ if w.s.ls != nil && base.Labels != nil {
+ if err := w.s.ls.Set(dgst, base.Labels); err != nil {
+ log.G(ctx).WithField("digest", dgst).Error("failed to set labels")
+ }
+ }
+
+ // change to readonly, more important for read, but provides _some_
+ // protection from this point on. We use the existing perms with a mask
+ // only allowing reads honoring the umask on creation.
+ //
+ // This removes write and exec, only allowing read per the creation umask.
+ //
+ // NOTE: Windows does not support this operation
+ if runtime.GOOS != "windows" {
+ if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
+ log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly")
+ }
+ }
+
+ return nil
+}
+
+// Close the writer, flushing any unwritten data and leaving the progress in
+// tact.
+//
+// If one needs to resume the transaction, a new writer can be obtained from
+// `Ingester.Writer` using the same key. The write can then be continued
+// from it was left off.
+//
+// To abandon a transaction completely, first call close then `IngestManager.Abort` to
+// clean up the associated resources.
+func (w *writer) Close() (err error) {
+ if w.fp != nil {
+ w.fp.Sync()
+ err = w.fp.Close()
+ writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt)
+ w.fp = nil
+ unlock(w.ref)
+ return
+ }
+
+ return nil
+}
+
+func (w *writer) Truncate(size int64) error {
+ if size != 0 {
+ return errors.New("Truncate: unsupported size")
+ }
+ w.offset = 0
+ w.digester.Hash().Reset()
+ if _, err := w.fp.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ return w.fp.Truncate(0)
+}
+
+func (w *writer) Sync() error {
+ if w.fp != nil {
+ return w.fp.Sync()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils/check.go b/vendor/github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils/check.go
new file mode 100644
index 000000000000..fd943777d282
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils/check.go
@@ -0,0 +1,297 @@
+//go:build linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package overlayutils
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/moby/sys/userns"
+ "golang.org/x/sys/unix"
+
+ "github.com/containerd/containerd/v2/core/mount"
+ kernel "github.com/containerd/containerd/v2/pkg/kernelversion"
+ "github.com/containerd/continuity/fs"
+ "github.com/containerd/log"
+)
+
+const (
+ // see https://man7.org/linux/man-pages/man2/statfs.2.html
+ tmpfsMagic = 0x01021994
+)
+
+// SupportsMultipleLowerDir checks if the system supports multiple lowerdirs,
+// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs
+// are always available (so this check isn't needed), and backported to RHEL and
+// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect
+// support on those kernels, without doing a kernel version compare.
+//
+// Ported from moby overlay2.
+func SupportsMultipleLowerDir(d string) error {
+ td, err := os.MkdirTemp(d, "multiple-lowerdir-check")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := os.RemoveAll(td); err != nil {
+ log.L.WithError(err).Warnf("Failed to remove check directory %v", td)
+ }
+ }()
+
+ for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
+ if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
+ return err
+ }
+ }
+
+ opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work"))
+ m := mount.Mount{
+ Type: "overlay",
+ Source: "overlay",
+ Options: []string{opts},
+ }
+ dest := filepath.Join(td, "merged")
+ if err := m.Mount(dest); err != nil {
+ return fmt.Errorf("failed to mount overlay: %w", err)
+ }
+ if err := mount.UnmountAll(dest, 0); err != nil {
+ log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest)
+ }
+ return nil
+}
+
+// Supported returns nil when the overlayfs is functional on the system with the root directory.
+// Supported is not called during plugin initialization, but exposed for downstream projects which uses
+// this snapshotter as a library.
+func Supported(root string) error {
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return err
+ }
+ supportsDType, err := fs.SupportsDType(root)
+ if err != nil {
+ return err
+ }
+ if !supportsDType {
+ return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root)
+ }
+ return SupportsMultipleLowerDir(root)
+}
+
+// IsPathOnTmpfs returns whether the path is on a tmpfs or not.
+//
+// It uses statfs to check if the fs type is TMPFS_MAGIC (0x01021994)
+// see https://man7.org/linux/man-pages/man2/statfs.2.html
+func IsPathOnTmpfs(d string) bool {
+ stat := syscall.Statfs_t{}
+ err := syscall.Statfs(d, &stat)
+ if err != nil {
+ log.L.WithError(err).Warnf("Could not retrieve statfs for %v", d)
+ return false
+ }
+
+ return stat.Type == tmpfsMagic
+}
+
+// NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option.
+//
+// The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11.
+//
+// The "userxattr" option is NOT needed for the initial user namespace (aka "the host").
+//
+// Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount
+// the overlayfs in a user namespace without the "userxattr" option.
+//
+// The corresponding kernel commit: /~https://github.com/torvalds/linux/commit/2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1
+// > ovl: user xattr
+// >
+// > Optionally allow using "user.overlay." namespace instead of "trusted.overlay."
+// > ...
+// > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the
+// > "user.overlay.redirect" or "user.overlay.metacopy" xattrs.
+// > ...
+//
+// The "userxattr" support is not exposed in "/sys/module/overlay/parameters".
+func NeedsUserXAttr(d string) (bool, error) {
+ if !userns.RunningInUserNS() {
+ // we are the real root (i.e., the root in the initial user NS),
+ // so we do never need "userxattr" opt.
+ return false, nil
+ }
+
+ // userxattr not permitted on tmpfs https://man7.org/linux/man-pages/man5/tmpfs.5.html
+ if IsPathOnTmpfs(d) {
+ return false, nil
+ }
+
+ // Fast path on kernels >= 5.11
+ //
+ // Keep in mind that distro vendors might be going to backport the patch to older kernels
+ // so we can't completely remove the "slow path".
+ fiveDotEleven := kernel.KernelVersion{Kernel: 5, Major: 11}
+ if ok, err := kernel.GreaterEqualThan(fiveDotEleven); err == nil && ok {
+ return true, nil
+ }
+
+ tdRoot := filepath.Join(d, "userxattr-check")
+ if err := os.RemoveAll(tdRoot); err != nil {
+ log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
+ }
+
+ if err := os.MkdirAll(tdRoot, 0700); err != nil {
+ return false, err
+ }
+
+ defer func() {
+ if err := os.RemoveAll(tdRoot); err != nil {
+ log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
+ }
+ }()
+
+ td, err := os.MkdirTemp(tdRoot, "")
+ if err != nil {
+ return false, err
+ }
+
+ for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
+ if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
+ return false, err
+ }
+ }
+
+ opts := []string{
+ "ro",
+ fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")),
+ "userxattr",
+ }
+
+ m := mount.Mount{
+ Type: "overlay",
+ Source: "overlay",
+ Options: opts,
+ }
+
+ dest := filepath.Join(td, "merged")
+ if err := m.Mount(dest); err != nil {
+ // Probably the host is running Ubuntu/Debian kernel (< 5.11) with the userns patch but without the userxattr patch.
+ // Return false without error.
+ log.L.WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr")
+ return false, nil
+ }
+ if err := mount.UnmountAll(dest, 0); err != nil {
+ log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest)
+ }
+ return true, nil
+}
+
+// SupportsIDMappedMounts tells if this kernel supports idmapped mounts for overlayfs
+// or not.
+//
+// This function returns error whether the kernel supports idmapped mounts
+// for overlayfs or not, i.e. if e.g. -ENOSYS may be returned as well as -EPERM.
+// So, caller should check for (true, err == nil), otherwise treat it as there's
+// no support from the kernel side.
+func SupportsIDMappedMounts() (bool, error) {
+ // Fast path
+ fiveDotNineteen := kernel.KernelVersion{Kernel: 5, Major: 19}
+ if ok, err := kernel.GreaterEqualThan(fiveDotNineteen); err == nil && ok {
+ return true, nil
+ }
+
+ // Do slow path, because idmapped mounts may be backported to older kernels.
+ uidMap := syscall.SysProcIDMap{
+ ContainerID: 0,
+ HostID: 666,
+ Size: 1,
+ }
+ gidMap := syscall.SysProcIDMap{
+ ContainerID: 0,
+ HostID: 666,
+ Size: 1,
+ }
+ td, err := os.MkdirTemp("", "ovl-idmapped-check")
+ if err != nil {
+ return false, fmt.Errorf("failed to create check directory: %w", err)
+ }
+ defer func() {
+ if err := os.RemoveAll(td); err != nil {
+ log.L.WithError(err).Warnf("failed to remove check directory %s", td)
+ }
+ }()
+
+ for _, dir := range []string{"lower", "upper", "work", "merged"} {
+ if err = os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
+ return false, fmt.Errorf("failed to create %s directory: %w", dir, err)
+ }
+ }
+ defer func() {
+ if err = os.RemoveAll(td); err != nil {
+ log.L.WithError(err).Warnf("failed remove overlay check directory %s", td)
+ }
+ }()
+
+ if err = os.Lchown(filepath.Join(td, "upper"), uidMap.HostID, gidMap.HostID); err != nil {
+ return false, fmt.Errorf("failed to chown upper directory %s: %w", filepath.Join(td, "upper"), err)
+ }
+
+ lowerDir := filepath.Join(td, "lower")
+ uidmap := fmt.Sprintf("%d:%d:%d", uidMap.ContainerID, uidMap.HostID, uidMap.Size)
+ gidmap := fmt.Sprintf("%d:%d:%d", gidMap.ContainerID, gidMap.HostID, gidMap.Size)
+
+ usernsFd, err := mount.GetUsernsFD(uidmap, gidmap)
+ if err != nil {
+ return false, err
+ }
+ defer usernsFd.Close()
+
+ if err = mount.IDMapMount(lowerDir, lowerDir, int(usernsFd.Fd())); err != nil {
+ return false, fmt.Errorf("failed to remap lowerdir %s: %w", lowerDir, err)
+ }
+ defer func() {
+ if err = unix.Unmount(lowerDir, 0); err != nil {
+ log.L.WithError(err).Warnf("failed to unmount lowerdir %s", lowerDir)
+ }
+ }()
+
+ opts := fmt.Sprintf("index=off,lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, filepath.Join(td, "upper"), filepath.Join(td, "work"))
+ if err = unix.Mount("", filepath.Join(td, "merged"), "overlay", uintptr(unix.MS_RDONLY), opts); err != nil {
+ return false, fmt.Errorf("failed to mount idmapped overlay to %s: %w", filepath.Join(td, "merged"), err)
+ }
+ defer func() {
+ if err = unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
+ log.L.WithError(err).Warnf("failed to unmount overlay check directory %s", filepath.Join(td, "merged"))
+ }
+ }()
+
+ // NOTE: we can't just return true if mount didn't fail since overlay supports
+ // idmappings for {lower,upper}dir. That means we need to check merged directory
+ // to make sure it completely supports idmapped mounts.
+ st, err := os.Stat(filepath.Join(td, "merged"))
+ if err != nil {
+ return false, fmt.Errorf("failed to stat %s: %w", filepath.Join(td, "merged"), err)
+ }
+ if stat, ok := st.Sys().(*syscall.Stat_t); !ok {
+ return false, fmt.Errorf("incompatible types after stat call: *syscall.Stat_t expected")
+ } else if int(stat.Uid) != uidMap.HostID || int(stat.Gid) != gidMap.HostID {
+ return false, fmt.Errorf("bad mapping: expected {uid: %d, gid: %d}; real {uid: %d, gid: %d}", uidMap.HostID, gidMap.HostID, int(stat.Uid), int(stat.Gid))
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/containerd/containerd/v2/plugins/types.go b/vendor/github.com/containerd/containerd/v2/plugins/types.go
new file mode 100644
index 000000000000..f5a0ff11aa52
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/plugins/types.go
@@ -0,0 +1,101 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// plugins package stores all the plugin types used by containerd internally.
+//
+// External plugins should copy from these types and avoid importing this
+// package.
+package plugins
+
+import "github.com/containerd/plugin"
+
+const (
+ // InternalPlugin implements an internal plugin to containerd
+ InternalPlugin plugin.Type = "io.containerd.internal.v1"
+ // RuntimePlugin implements a runtime
+ RuntimePlugin plugin.Type = "io.containerd.runtime.v1"
+ // RuntimePluginV2 implements a runtime v2
+ RuntimePluginV2 plugin.Type = "io.containerd.runtime.v2"
+ // ServicePlugin implements a internal service
+ ServicePlugin plugin.Type = "io.containerd.service.v1"
+ // GRPCPlugin implements a grpc service
+ GRPCPlugin plugin.Type = "io.containerd.grpc.v1"
+ // TTRPCPlugin implements a ttrpc shim service
+ TTRPCPlugin plugin.Type = "io.containerd.ttrpc.v1"
+ // SnapshotPlugin implements a snapshotter
+ SnapshotPlugin plugin.Type = "io.containerd.snapshotter.v1"
+ // TaskMonitorPlugin implements a task monitor
+ TaskMonitorPlugin plugin.Type = "io.containerd.monitor.task.v1"
+ // TaskMonitorPlugin implements a container monitor
+ ContainerMonitorPlugin plugin.Type = "io.containerd.monitor.container.v1"
+ // DiffPlugin implements a differ
+ DiffPlugin plugin.Type = "io.containerd.differ.v1"
+ // MetadataPlugin implements a metadata store
+ MetadataPlugin plugin.Type = "io.containerd.metadata.v1"
+ // ContentPlugin implements a content store
+ ContentPlugin plugin.Type = "io.containerd.content.v1"
+ // GCPlugin implements garbage collection policy
+ GCPlugin plugin.Type = "io.containerd.gc.v1"
+ // EventPlugin implements event handling
+ EventPlugin plugin.Type = "io.containerd.event.v1"
+ // LeasePlugin implements lease manager
+ LeasePlugin plugin.Type = "io.containerd.lease.v1"
+ // StreamingPlugin implements a stream manager
+ StreamingPlugin plugin.Type = "io.containerd.streaming.v1"
+ // TracingProcessorPlugin implements a open telemetry span processor
+ TracingProcessorPlugin plugin.Type = "io.containerd.tracing.processor.v1"
+ // NRIApiPlugin implements the NRI adaptation interface for containerd.
+ NRIApiPlugin plugin.Type = "io.containerd.nri.v1"
+ // TransferPlugin implements a transfer service
+ TransferPlugin plugin.Type = "io.containerd.transfer.v1"
+ // SandboxStorePlugin implements a sandbox store
+ SandboxStorePlugin plugin.Type = "io.containerd.sandbox.store.v1"
+ // SandboxControllerPlugin implements a sandbox controller
+ SandboxControllerPlugin plugin.Type = "io.containerd.sandbox.controller.v1"
+ // ImageVerifierPlugin implements an image verifier service
+ ImageVerifierPlugin plugin.Type = "io.containerd.image-verifier.v1"
+ // WarningPlugin implements a warning service
+ WarningPlugin plugin.Type = "io.containerd.warning.v1"
+ // CRIServicePlugin implements a cri service
+ CRIServicePlugin plugin.Type = "io.containerd.cri.v1"
+ // ShimPlugin implements a shim service
+ ShimPlugin plugin.Type = "io.containerd.shim.v1"
+)
+
+const (
+ // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim
+ RuntimeRuncV2 = "io.containerd.runc.v2"
+
+ // RuntimeRunhcsV1 is the runtime type for runhcs.
+ RuntimeRunhcsV1 = "io.containerd.runhcs.v1"
+
+ DeprecationsPlugin = "deprecations"
+)
+
+const (
+ // PropertyRootDir sets the root directory property for a plugin
+ PropertyRootDir = "io.containerd.plugin.root"
+ // PropertyStateDir sets the state directory property for a plugin
+ PropertyStateDir = "io.containerd.plugin.state"
+ // PropertyGRPCAddress is the grpc address used for client connections to containerd
+ PropertyGRPCAddress = "io.containerd.plugin.grpc.address"
+ // PropertyTTRPCAddress is the ttrpc address used for client connections to containerd
+ PropertyTTRPCAddress = "io.containerd.plugin.ttrpc.address"
+)
+
+const (
+ SnapshotterRootDir = "root"
+)
diff --git a/vendor/github.com/containerd/containerd/v2/version/version.go b/vendor/github.com/containerd/containerd/v2/version/version.go
new file mode 100644
index 000000000000..cc3e81205071
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/v2/version/version.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package version
+
+import "runtime"
+
+var (
+ // Package is filled at linking time
+ Package = "github.com/containerd/containerd/v2"
+
+ // Version holds the complete version number. Filled in at linking time.
+ Version = "2.0.0-rc.4+unknown"
+
+ // Revision is filled with the VCS (e.g. git) revision being used to build
+ // the program at linking time.
+ Revision = ""
+
+ // GoVersion is Go tree's version.
+ GoVersion = runtime.Version()
+)
+
+// ConfigVersion is the current highest supported configuration version.
+// This version is used by the main configuration as well as all plugins.
+// Any configuration less than this version which has structural changes
+// should migrate the configuration structures used by this version.
+const ConfigVersion = 3
diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go
index af3abdd4c49c..a3fef3c05718 100644
--- a/vendor/github.com/containerd/continuity/fs/copy.go
+++ b/vendor/github.com/containerd/continuity/fs/copy.go
@@ -103,11 +103,6 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
}
}
- entries, err := os.ReadDir(src)
- if err != nil {
- return fmt.Errorf("failed to read %s: %w", src, err)
- }
-
if err := copyFileInfo(stat, src, dst); err != nil {
return fmt.Errorf("failed to copy file info for %s: %w", dst, err)
}
@@ -116,7 +111,15 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
return fmt.Errorf("failed to copy xattrs: %w", err)
}
- for _, entry := range entries {
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ dr := &dirReader{f: f}
+
+ handleEntry := func(entry os.DirEntry) error {
source := filepath.Join(src, entry.Name())
target := filepath.Join(dst, entry.Name())
@@ -130,7 +133,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
if err := copyDirectory(target, source, inodes, o); err != nil {
return err
}
- continue
+ return nil
case (fileInfo.Mode() & os.ModeType) == 0:
link, err := getLinkSource(target, fileInfo, inodes)
if err != nil {
@@ -159,7 +162,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
}
default:
logrus.Warnf("unsupported mode: %s: %s", source, fileInfo.Mode())
- continue
+ return nil
}
if err := copyFileInfo(fileInfo, source, target); err != nil {
@@ -169,9 +172,20 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
if err := copyXAttrs(target, source, o.xex, o.xeh); err != nil {
return fmt.Errorf("failed to copy xattrs: %w", err)
}
+ return nil
}
- return nil
+ for {
+ entry := dr.Next()
+ if entry == nil {
+ break
+ }
+
+ if err := handleEntry(entry); err != nil {
+ return err
+ }
+ }
+ return dr.Err()
}
// CopyFile copies the source file to the target.
diff --git a/vendor/github.com/containerd/continuity/fs/dir.go b/vendor/github.com/containerd/continuity/fs/dir.go
new file mode 100644
index 000000000000..6c7e32e95d18
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/dir.go
@@ -0,0 +1,53 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "io"
+ "os"
+)
+
+type dirReader struct {
+ buf []os.DirEntry
+ f *os.File
+ err error
+}
+
+func (r *dirReader) Next() os.DirEntry {
+ if len(r.buf) == 0 {
+ infos, err := r.f.ReadDir(32)
+ if err != nil {
+ if err != io.EOF {
+ r.err = err
+ }
+ return nil
+ }
+ r.buf = infos
+ }
+
+ if len(r.buf) == 0 {
+ return nil
+ }
+ out := r.buf[0]
+ r.buf[0] = nil
+ r.buf = r.buf[1:]
+ return out
+}
+
+func (r *dirReader) Err() error {
+ return r.err
+}
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/.dockerignore b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/.dockerignore
new file mode 100644
index 000000000000..3c975c15712f
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/.dockerignore
@@ -0,0 +1,2 @@
+*.test
+bin
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/.gitignore b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/.gitignore
new file mode 100644
index 000000000000..43be5384778a
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/.gitignore
@@ -0,0 +1,3 @@
+*.test
+bin
+/_output
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/Dockerfile b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/Dockerfile
new file mode 100644
index 000000000000..cbd173ae191f
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/Dockerfile
@@ -0,0 +1,64 @@
+# Copyright The containerd Authors.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG FUSEOVERLAYFS_COMMIT=main
+ARG ROOTLESSKIT_COMMIT=v1.1.0
+
+ARG GO_VERSION=1.20
+ARG DEBIAN_VERSION=11
+ARG ALPINE_VERSION=3.18
+
+FROM golang:${GO_VERSION}-alpine AS containerd-fuse-overlayfs-test
+COPY . /go/src/github.com/containerd/fuse-overlayfs-snapshotter
+WORKDIR /go/src/github.com/containerd/fuse-overlayfs-snapshotter
+ENV CGO_ENABLED=0
+ENV GO111MODULE=on
+RUN mkdir /out && go test -c -o /out/containerd-fuse-overlayfs.test
+
+# from /~https://github.com/containers/fuse-overlayfs/blob/53c17dab78b43de1cd121bf9260b20b76371bbaf/Dockerfile.static.ubuntu
+FROM debian:${DEBIAN_VERSION} AS fuse-overlayfs
+RUN apt-get update && \
+ apt-get install --no-install-recommends -y \
+ git ca-certificates libc6-dev gcc g++ make automake autoconf clang pkgconf libfuse3-dev
+RUN git clone /~https://github.com/containers/fuse-overlayfs
+WORKDIR fuse-overlayfs
+ARG FUSEOVERLAYFS_COMMIT
+RUN git pull && git checkout ${FUSEOVERLAYFS_COMMIT}
+RUN ./autogen.sh && \
+ LIBS="-ldl" LDFLAGS="-static" ./configure && \
+ make && mkdir /out && cp fuse-overlayfs /out
+
+FROM golang:${GO_VERSION}-alpine AS rootlesskit
+RUN apk add --no-cache git
+RUN git clone /~https://github.com/rootless-containers/rootlesskit.git /go/src/github.com/rootless-containers/rootlesskit
+WORKDIR /go/src/github.com/rootless-containers/rootlesskit
+ARG ROOTLESSKIT_COMMIT
+RUN git pull && git checkout ${ROOTLESSKIT_COMMIT}
+ENV CGO_ENABLED=0
+RUN mkdir /out && go build -o /out/rootlesskit github.com/rootless-containers/rootlesskit/cmd/rootlesskit
+
+FROM alpine:${ALPINE_VERSION}
+COPY --from=containerd-fuse-overlayfs-test /out/containerd-fuse-overlayfs.test /usr/local/bin
+COPY --from=rootlesskit /out/rootlesskit /usr/local/bin
+COPY --from=fuse-overlayfs /out/fuse-overlayfs /usr/local/bin
+RUN apk add --no-cache fuse3 libcap shadow-uidmap && \
+ setcap CAP_SETUID=ep /usr/bin/newuidmap && \
+ setcap CAP_SETGID=ep /usr/bin/newgidmap && \
+ adduser -D -u 1000 testuser && \
+ echo testuser:100000:65536 | tee /etc/subuid | tee /etc/subgid
+USER testuser
+# If /tmp is real overlayfs, some tests fail. Mount a volume to ensure /tmp to be a sane filesystem.
+VOLUME /tmp
+# requires --security-opt seccomp=unconfined --security-opt apparmor=unconfined --device /dev/fuse
+CMD ["rootlesskit", "containerd-fuse-overlayfs.test", "-test.root", "-test.v"]
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/LICENSE b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/LICENSE
new file mode 100644
index 000000000000..584149b6ee28
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/MAINTAINERS b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/MAINTAINERS
new file mode 100644
index 000000000000..a35ce284e4f1
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/MAINTAINERS
@@ -0,0 +1,4 @@
+# fuse-overlayfs-snapshotter maintainers
+#
+# As a containerd sub-project, containerd maintainers are also included from /~https://github.com/containerd/project/blob/master/MAINTAINERS.
+# See /~https://github.com/containerd/project/blob/master/GOVERNANCE.md for description of maintainer role
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/Makefile b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/Makefile
new file mode 100644
index 000000000000..d646e3dd4f4a
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/Makefile
@@ -0,0 +1,108 @@
+# Copyright The containerd Authors.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Deliverables path
+DESTDIR ?= /usr/local
+BINDIR ?= $(DESTDIR)/bin
+
+# Tools path
+ECHO ?= echo
+DOCKER ?= docker
+GO ?= go
+MKDIR ?= mkdir
+TAR ?= tar
+INSTALL ?= install
+GIT ?= git
+
+TARGET_BIN=containerd-fuse-overlayfs-grpc
+
+VERSION ?= $(shell $(GIT) describe --match 'v[0-9]*' --dirty='.m' --always --tags)
+VERSION_TRIMMED := $(VERSION:v%=%)
+REVISION ?= $(shell $(GIT) rev-parse HEAD)$(shell if ! $(GIT) diff --no-ext-diff --quiet --exit-code; then $(ECHO) .m; fi)
+
+PKG_MAIN := github.com/containerd/fuse-overlayfs-snapshotter/cmd/$(TARGET_BIN)
+PKG_VERSION := github.com/containerd/fuse-overlayfs-snapshotter/cmd/$(TARGET_BIN)/version
+
+export GO_BUILD=GO111MODULE=on CGO_ENABLED=0 $(GO) build -ldflags "-s -w -X $(PKG_VERSION).Version=$(VERSION) -X $(PKG_VERSION).Revision=$(REVISION)"
+
+bin/$(TARGET_BIN):
+ $(GO_BUILD) -o $@ $(PKG_MAIN)
+
+all: binaries
+
+help:
+ @$(ECHO) "Usage: make "
+ @$(ECHO)
+ @$(ECHO) " * 'install' - Install binaries to system locations."
+ @$(ECHO) " * 'uninstall' - Uninstall binaries from system."
+ @$(ECHO) " * 'binaries' - Build $(TARGET_BIN)."
+ @$(ECHO) " * 'test' - Run tests."
+ @$(ECHO) " * 'clean' - Clean artifacts."
+ @$(ECHO) " * 'help' - Show this help message."
+
+binaries: bin/$(TARGET_BIN)
+
+$(TARGET_BIN):
+ $(GO_BUILD) -o $(CURDIR)/bin/$@ $(PKG_MAIN)
+
+binaries: $(TARGET_BIN)
+
+install:
+ $(INSTALL) -D -m 755 $(CURDIR)/bin/$(TARGET_BIN) $(BINDIR)/$(TARGET_BIN)
+
+uninstall:
+ $(RM) $(BINDIR)/$(TARGET_BIN)
+
+clean:
+ $(RM) -r $(CURDIR)/bin $(CURDIR)/_output
+
+TEST_DOCKER_IMG_TAG=containerd-fuse-overlayfs-test
+
+test:
+ DOCKER_BUILDKIT=1 $(DOCKER) build -t $(TEST_DOCKER_IMG_TAG) --build-arg FUSEOVERLAYFS_COMMIT=${FUSEOVERLAYFS_COMMIT} .
+ $(DOCKER) run --rm $(TEST_DOCKER_IMG_TAG) fuse-overlayfs -V
+ $(DOCKER) run --rm --security-opt seccomp=unconfined --security-opt apparmor=unconfined --device /dev/fuse $(TEST_DOCKER_IMG_TAG)
+ $(DOCKER) rmi $(TEST_DOCKER_IMG_TAG)
+
+_test:
+ $(GO) test -exec rootlesskit -test.v -test.root
+
+TAR_FLAGS=--transform 's/.*\///g' --owner=0 --group=0
+
+ARTIFACT_NAME=containerd-fuse-overlayfs-$(VERSION_TRIMMED)
+
+artifacts: clean
+ $(MKDIR) -p _output
+ GOOS=linux GOARCH=amd64 make -B
+ $(TAR) $(TAR_FLAGS) -czvf _output/$(ARTIFACT_NAME)-linux-amd64.tar.gz $(CURDIR)/bin/*
+ GOOS=linux GOARCH=arm64 make -B
+ $(TAR) $(TAR_FLAGS) -czvf _output/$(ARTIFACT_NAME)-linux-arm64.tar.gz $(CURDIR)/bin/*
+ GOOS=linux GOARCH=arm GOARM=7 make -B
+ $(TAR) $(TAR_FLAGS) -czvf _output/$(ARTIFACT_NAME)-linux-arm-v7.tar.gz $(CURDIR)/bin/*
+ GOOS=linux GOARCH=ppc64le make -B
+ $(TAR) $(TAR_FLAGS) -czvf _output/$(ARTIFACT_NAME)-linux-ppc64le.tar.gz $(CURDIR)/bin/*
+ GOOS=linux GOARCH=s390x make -B
+ $(TAR) $(TAR_FLAGS) -czvf _output/$(ARTIFACT_NAME)-linux-s390x.tar.gz $(CURDIR)/bin/*
+ GOOS=linux GOARCH=riscv64 make -B
+ $(TAR) $(TAR_FLAGS) -czvf _output/$(ARTIFACT_NAME)-linux-riscv64.tar.gz $(CURDIR)/bin/*
+
+.PHONY: \
+ $(TARGET_BIN) \
+ install \
+ uninstall \
+ clean \
+ test \
+ _test \
+ artifacts \
+ help
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/README.md b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/README.md
new file mode 100644
index 000000000000..15648af873ca
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/README.md
@@ -0,0 +1,157 @@
+# [`fuse-overlayfs`](/~https://github.com/containers/fuse-overlayfs) snapshotter plugin for [containerd](https://containerd.io)
+
+Unlike `overlayfs`, `fuse-overlayfs` can be used as a non-root user on almost all recent distros.
+
+You do NOT need this `fuse-overlayfs` plugin on the following environments, because they support the real `overlayfs` for non-root users:
+- [kernel >= 5.11](/~https://github.com/torvalds/linux/commit/459c7c565ac36ba09ffbf24231147f408fde4203)
+- [Ubuntu kernel, since circa 2015](https://kernel.ubuntu.com/git/ubuntu/ubuntu-bionic.git/commit/fs/overlayfs?id=3b7da90f28fe1ed4b79ef2d994c81efbc58f1144)
+
+fuse-overlayfs-snapshotter is a **non-core** sub-project of containerd.
+
+## Requirements
+* kernel >= 4.18
+* containerd >= 1.4
+* fuse-overlayfs >= 0.7.0
+
+## Setup
+
+Two installation options are supported:
+1. Embed `fuse-overlayfs` plugin into the containerd binary
+2. Execute `fuse-overlayfs` plugin as a separate binary
+
+Choose 1 if you don't mind recompiling containerd, otherwise choose 2.
+
+### Option 1: Embed `fuse-overlayfs` plugin into the containerd binary
+
+Create `builtins_fuseoverlayfs_linux.go` under [`$GOPATH/src/github.com/containerd/containerd/cmd/containerd`](/~https://github.com/containerd/containerd/tree/master/cmd/containerd)
+with the following content, and recompile the containerd binary:
+
+```go
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package main
+
+import _ "github.com/containerd/fuse-overlayfs-snapshotter/plugin"
+```
+
+No extra configuration is needed.
+
+See /~https://github.com/containerd/containerd/blob/master/docs/rootless.md for how to run containerd as a non-root user.
+
+### Option 2: Execute `fuse-overlayfs` plugin as a separate binary
+
+#### "Easy way"
+
+The easiest way is to use `containerd-rootless-setuptool.sh` included in [nerdctl](/~https://github.com/containerd/nerdctl).
+
+```console
+$ containerd-rootless-setuptool.sh install
+$ containerd-rootless-setuptool.sh install-fuse-overlayfs
+[INFO] Creating "/home/exampleuser/.config/systemd/user/containerd-fuse-overlayfs.service"
+...
+[INFO] Installed "containerd-fuse-overlayfs.service" successfully.
+[INFO] To control "containerd-fuse-overlayfs.service", run: `systemctl --user (start|stop|restart) containerd-fuse-overlayfs.service`
+[INFO] Add the following lines to "/home/exampleuser/.config/containerd/config.toml" manually:
+### BEGIN ###
+[proxy_plugins]
+ [proxy_plugins."fuse-overlayfs"]
+ type = "snapshot"
+ address = "/run/user/1000/containerd-fuse-overlayfs.sock"
+### END ###
+[INFO] Set `export CONTAINERD_SNAPSHOTTER="fuse-overlayfs"` to use the fuse-overlayfs snapshotter.
+```
+
+Add the `[proxy_plugins."fuse-overlayfs"]` configuration shown above to `~/.config/containerd/config.toml`.
+"1000" needs to be replaced with your actual UID.
+
+#### "Hard way"
+
+
+Click here to show the "hard way"
+
+
+
+* Install `containerd-fuse-overlayfs-grpc` binary. The binary will be installed under `$DESTDIR/bin`.
+```console
+$ make && DESTDIR=$HOME make install
+```
+
+* Create the following configuration in `~/.config/containerd/config.toml`:
+```toml
+version = 2
+# substitute "/home/suda" with your own $HOME
+root = "/home/suda/.local/share/containerd"
+# substitute "/run/user/1001" with your own $XDG_RUNTIME_DIR
+state = "/run/user/1001/containerd"
+
+[grpc]
+ address = "/run/user/1001/containerd/containerd.sock"
+
+[proxy_plugins]
+ [proxy_plugins."fuse-overlayfs"]
+ type = "snapshot"
+ address = "/run/user/1001/containerd/fuse-overlayfs.sock"
+```
+
+* Start [RootlessKit](/~https://github.com/rootless-containers/rootlesskit) with `sleep infinity` (or any kind of "pause" command):
+```console
+$ rootlesskit \
+ --net=slirp4netns --disable-host-loopback \
+ --copy-up=/etc --copy-up=/run \
+ --state-dir=$XDG_RUNTIME_DIR/rootlesskit-containerd \
+ sh -c "rm -rf /run/containerd ; sleep infinity"
+```
+(Note: `rm -rf /run/containerd` is a workaround for [containerd/containerd#2767](/~https://github.com/containerd/containerd/issues/2767))
+
+* Enter the RootlessKit namespaces and run `containerd-fuse-overlayfs-grpc`:
+```console
+$ nsenter -U --preserve-credentials -m -n -t $(cat $XDG_RUNTIME_DIR/rootlesskit-containerd/child_pid) \
+ containerd-fuse-overlayfs-grpc $XDG_RUNTIME_DIR/containerd/fuse-overlayfs.sock $HOME/.local/share/containerd-fuse-overlayfs
+```
+
+* Enter the same namespaces and run `containerd`:
+```console
+$ nsenter -U --preserve-credentials -m -n -t $(cat $XDG_RUNTIME_DIR/rootlesskit-containerd/child_pid) \
+ containerd -c $HOME/.config/containerd/config.toml
+```
+
+
+
+
+## Usage
+
+```console
+$ export CONTAINERD_SNAPSHOTTER=fuse-overlayfs
+$ nerdctl run ...
+```
+
+## How to test
+
+To run the test as a non-root user, [RootlessKit](/~https://github.com/rootless-containers/rootlesskit) needs to be installed.
+
+```console
+$ go test -exec rootlesskit -test.v -test.root
+```
+
+## Project details
+fuse-overlayfs-snapshotter is a containerd **non-core** sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd non-core sub-project, you will find the:
+ * [Project governance](/~https://github.com/containerd/project/blob/master/GOVERNANCE.md),
+ * [Maintainers](./MAINTAINERS),
+ * and [Contributing guidelines](/~https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+
+information in our [`containerd/project`](/~https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/check.go b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/check.go
new file mode 100644
index 000000000000..e41268bc9311
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/check.go
@@ -0,0 +1,82 @@
+//go:build linux
+// +build linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fuseoverlayfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ "github.com/containerd/containerd/mount"
+ "github.com/containerd/log"
+)
+
+// supportsReadonlyMultipleLowerDir checks if read-only multiple lowerdirs can be mounted with fuse-overlayfs.
+// /~https://github.com/containers/fuse-overlayfs/pull/133
+func supportsReadonlyMultipleLowerDir(d string) error {
+ td, err := ioutil.TempDir(d, "fuseoverlayfs-check")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := os.RemoveAll(td); err != nil {
+ log.L.WithError(err).Warnf("Failed to remove check directory %v", td)
+ }
+ }()
+
+ for _, dir := range []string{"lower1", "lower2", "merged"} {
+ if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
+ return err
+ }
+ }
+
+ opts := []string{fmt.Sprintf("lowerdir=%s:%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"))}
+ m := mount.Mount{
+ Type: "fuse3." + fuseoverlayfsBinary,
+ Source: "overlay",
+ Options: opts,
+ }
+ dest := filepath.Join(td, "merged")
+ if err := m.Mount(dest); err != nil {
+ return fmt.Errorf("failed to mount fuse-overlayfs (%+v) on %s: %w", m, dest, err)
+ }
+ if err := mount.UnmountAll(dest, 0); err != nil {
+ log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest)
+ }
+ return nil
+}
+
+// Supported returns nil when the overlayfs is functional on the system with the root directory.
+// Supported is not called during plugin initialization, but exposed for downstream projects which uses
+// this snapshotter as a library.
+func Supported(root string) error {
+ if _, err := exec.LookPath(fuseoverlayfsBinary); err != nil {
+ return fmt.Errorf("%s not installed: %w", fuseoverlayfsBinary, err)
+ }
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return err
+ }
+ if err := supportsReadonlyMultipleLowerDir(root); err != nil {
+ return fmt.Errorf("fuse-overlayfs not functional, make sure running with kernel >= 4.18: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/fuseoverlayfs.go b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/fuseoverlayfs.go
new file mode 100644
index 000000000000..d5a1b9699076
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/fuseoverlayfs.go
@@ -0,0 +1,511 @@
+//go:build linux
+// +build linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fuseoverlayfs
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containerd/containerd/mount"
+ "github.com/containerd/containerd/snapshots"
+ "github.com/containerd/containerd/snapshots/storage"
+ "github.com/containerd/continuity/fs"
+ "github.com/containerd/log"
+)
+
+const (
+ fuseoverlayfsBinary = "fuse-overlayfs"
+)
+
+// SnapshotterConfig is used to configure the overlay snapshotter instance
+type SnapshotterConfig struct {
+ asyncRemove bool
+}
+
+// Opt is an option to configure the overlay snapshotter
+type Opt func(config *SnapshotterConfig) error
+
+// AsynchronousRemove defers removal of filesystem content until
+// the Cleanup method is called. Removals will make the snapshot
+// referred to by the key unavailable and make the key immediately
+// available for re-use.
+//
+// AsynchronousRemove is untested for fuse-overlayfs
+func AsynchronousRemove(config *SnapshotterConfig) error {
+ config.asyncRemove = true
+ return nil
+}
+
+type snapshotter struct {
+ root string
+ ms *storage.MetaStore
+ asyncRemove bool
+}
+
+// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs
+// diffs are stored under the provided root. A metadata file is stored under
+// the root.
+func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) {
+ var config SnapshotterConfig
+ for _, opt := range opts {
+ if err := opt(&config); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, err
+ }
+ ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db"))
+ if err != nil {
+ return nil, err
+ }
+
+ if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+
+ return &snapshotter{
+ root: root,
+ ms: ms,
+ asyncRemove: config.asyncRemove,
+ }, nil
+}
+
+// Stat returns the info for an active or committed snapshot by name or
+// key.
+//
+// Should be used for parent resolution, existence checks and to discern
+// the kind of snapshot.
+func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+ defer t.Rollback()
+ _, info, _, err := storage.GetInfo(ctx, key)
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+
+ return info, nil
+}
+
+func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+
+ info, err = storage.UpdateInfo(ctx, info, fieldpaths...)
+ if err != nil {
+ t.Rollback()
+ return snapshots.Info{}, err
+ }
+
+ if err := t.Commit(); err != nil {
+ return snapshots.Info{}, err
+ }
+
+ return info, nil
+}
+
+// Usage returns the resources taken by the snapshot identified by key.
+//
+// For active snapshots, this will scan the usage of the overlay "diff" (aka
+// "upper") directory and may take some time.
+//
+// For committed snapshots, the value is returned from the metadata database.
+func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return snapshots.Usage{}, err
+ }
+ id, info, usage, err := storage.GetInfo(ctx, key)
+ t.Rollback() // transaction no longer needed at this point.
+
+ if err != nil {
+ return snapshots.Usage{}, err
+ }
+
+ upperPath := o.upperPath(id)
+
+ if info.Kind == snapshots.KindActive {
+ du, err := fs.DiskUsage(ctx, upperPath)
+ if err != nil {
+ // TODO(stevvooe): Consider not reporting an error in this case.
+ return snapshots.Usage{}, err
+ }
+
+ usage = snapshots.Usage(du)
+ }
+
+ return usage, nil
+}
+
+func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
+ return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts)
+}
+
+func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
+ return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts)
+}
+
+// Mounts returns the mounts for the transaction identified by key. Can be
+// called on an read-write or readonly transaction.
+//
+// This can be used to recover mounts after calling View or Prepare.
+func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+ s, err := storage.GetSnapshot(ctx, key)
+ if err != nil {
+ return nil, err
+ }
+ _, info, _, err := storage.GetInfo(ctx, key)
+ t.Rollback()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get active mount: %w", err)
+ }
+ return o.mounts(s, info), nil
+}
+
+func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ if err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ }
+ }()
+
+ // grab the existing id
+ id, _, _, err := storage.GetInfo(ctx, key)
+ if err != nil {
+ return err
+ }
+
+ usage, err := fs.DiskUsage(ctx, o.upperPath(id))
+ if err != nil {
+ return err
+ }
+
+ if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil {
+ return fmt.Errorf("failed to commit snapshot: %w", err)
+ }
+ return t.Commit()
+}
+
+// Remove abandons the snapshot identified by key. The snapshot will
+// immediately become unavailable and unrecoverable. Disk space will
+// be freed up on the next call to `Cleanup`.
+func (o *snapshotter) Remove(ctx context.Context, key string) (err error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ }
+ }()
+
+ _, _, err = storage.Remove(ctx, key)
+ if err != nil {
+ return fmt.Errorf("failed to remove: %w", err)
+ }
+
+ if !o.asyncRemove {
+ var removals []string
+ removals, err = o.getCleanupDirectories(ctx, t)
+ if err != nil {
+ return fmt.Errorf("unable to get directories for removal: %w", err)
+ }
+
+ // Remove directories after the transaction is closed, failures must not
+ // return error since the transaction is committed with the removal
+ // key no longer available.
+ defer func() {
+ if err == nil {
+ for _, dir := range removals {
+ if err := os.RemoveAll(dir); err != nil {
+ log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory")
+ }
+ }
+ }
+ }()
+
+ }
+
+ return t.Commit()
+}
+
+// Walk the committed snapshots.
+func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return err
+ }
+ defer t.Rollback()
+ return storage.WalkInfo(ctx, fn, fs...)
+}
+
+// Cleanup cleans up disk resources from removed or abandoned snapshots
+func (o *snapshotter) Cleanup(ctx context.Context) error {
+ cleanup, err := o.cleanupDirectories(ctx)
+ if err != nil {
+ return err
+ }
+
+ for _, dir := range cleanup {
+ if err := os.RemoveAll(dir); err != nil {
+ log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory")
+ }
+ }
+
+ return nil
+}
+
+func (o *snapshotter) cleanupDirectories(ctx context.Context) ([]string, error) {
+ // Get a write transaction to ensure no other write transaction can be entered
+ // while the cleanup is scanning.
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return nil, err
+ }
+
+ defer t.Rollback()
+ return o.getCleanupDirectories(ctx, t)
+}
+
+func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Transactor) ([]string, error) {
+ ids, err := storage.IDMap(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ snapshotDir := filepath.Join(o.root, "snapshots")
+ fd, err := os.Open(snapshotDir)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ dirs, err := fd.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+
+ cleanup := []string{}
+ for _, d := range dirs {
+ if _, ok := ids[d]; ok {
+ continue
+ }
+
+ cleanup = append(cleanup, filepath.Join(snapshotDir, d))
+ }
+
+ return cleanup, nil
+}
+
+func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return nil, err
+ }
+
+ var td, path string
+ defer func() {
+ if err != nil {
+ if td != "" {
+ if err1 := os.RemoveAll(td); err1 != nil {
+ log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory")
+ }
+ }
+ if path != "" {
+ if err1 := os.RemoveAll(path); err1 != nil {
+ log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal")
+ err = fmt.Errorf("failed to remove path: %v: %w", err1, err)
+ }
+ }
+ }
+ }()
+
+ snapshotDir := filepath.Join(o.root, "snapshots")
+ td, err = o.prepareDirectory(ctx, snapshotDir, kind)
+ if err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ return nil, fmt.Errorf("failed to create prepare snapshot dir: %w", err)
+ }
+ rollback := true
+ defer func() {
+ if rollback {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ }
+ }()
+
+ s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create snapshot: %w", err)
+ }
+
+ if len(s.ParentIDs) > 0 {
+ st, err := os.Stat(o.upperPath(s.ParentIDs[0]))
+ if err != nil {
+ return nil, fmt.Errorf("failed to stat parent: %w", err)
+ }
+
+ stat := st.Sys().(*syscall.Stat_t)
+
+ if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ return nil, fmt.Errorf("failed to chown: %w", err)
+ }
+ }
+
+ path = filepath.Join(snapshotDir, s.ID)
+ if err = os.Rename(td, path); err != nil {
+ return nil, fmt.Errorf("failed to rename: %w", err)
+ }
+ td = ""
+
+ _, info, _, err := storage.GetInfo(ctx, key)
+
+ rollback = false
+ if err = t.Commit(); err != nil {
+ return nil, fmt.Errorf("commit failed: %w", err)
+ }
+
+ return o.mounts(s, info), nil
+}
+
+func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) {
+ td, err := ioutil.TempDir(snapshotDir, "new-")
+ if err != nil {
+ return "", fmt.Errorf("failed to create temp dir: %w", err)
+ }
+
+ if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil {
+ return td, err
+ }
+
+ if kind == snapshots.KindActive {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil {
+ return td, err
+ }
+ }
+
+ return td, nil
+}
+
+func (o *snapshotter) mounts(s storage.Snapshot, info snapshots.Info) []mount.Mount {
+ if len(s.ParentIDs) == 0 {
+ // if we only have one layer/no parents then just return a bind mount as overlay
+ // will not work
+ roFlag := "rw"
+ if s.Kind == snapshots.KindView {
+ roFlag = "ro"
+ }
+
+ return []mount.Mount{
+ {
+ Source: o.upperPath(s.ID),
+ Type: "bind",
+ Options: []string{
+ roFlag,
+ "rbind",
+ },
+ },
+ }
+ }
+ var options []string
+
+ if s.Kind == snapshots.KindActive {
+ options = append(options,
+ fmt.Sprintf("workdir=%s", o.workPath(s.ID)),
+ fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)),
+ )
+ } else if len(s.ParentIDs) == 1 {
+ return []mount.Mount{
+ {
+ Source: o.upperPath(s.ParentIDs[0]),
+ Type: "bind",
+ Options: []string{
+ "ro",
+ "rbind",
+ },
+ },
+ }
+ }
+
+ parentPaths := make([]string, len(s.ParentIDs))
+ for i := range s.ParentIDs {
+ parentPaths[i] = o.upperPath(s.ParentIDs[i])
+ }
+
+ options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":")))
+ if mapping, ok := info.Labels["containerd.io/snapshot/uidmapping"]; ok {
+ options = append(options, fmt.Sprintf("uidmapping=%s", mapping))
+ }
+ if mapping, ok := info.Labels["containerd.io/snapshot/gidmapping"]; ok {
+ options = append(options, fmt.Sprintf("gidmapping=%s", mapping))
+ }
+ return []mount.Mount{
+ {
+ Type: "fuse3." + fuseoverlayfsBinary,
+ Source: "overlay",
+ Options: options,
+ },
+ }
+
+}
+
+func (o *snapshotter) upperPath(id string) string {
+ return filepath.Join(o.root, "snapshots", id, "fs")
+}
+
+func (o *snapshotter) workPath(id string) string {
+ return filepath.Join(o.root, "snapshots", id, "work")
+}
+
+// Close closes the snapshotter
+func (o *snapshotter) Close() error {
+ return o.ms.Close()
+}
diff --git a/vendor/github.com/containerd/fuse-overlayfs-snapshotter/plugin/plugin.go b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/plugin/plugin.go
new file mode 100644
index 000000000000..40256f9fbeac
--- /dev/null
+++ b/vendor/github.com/containerd/fuse-overlayfs-snapshotter/plugin/plugin.go
@@ -0,0 +1,58 @@
+//go:build linux
+// +build linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fuseoverlayfs
+
+import (
+ "errors"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/containerd/containerd/plugin"
+ fuseoverlayfs "github.com/containerd/fuse-overlayfs-snapshotter"
+)
+
+// Config represents configuration for the fuse-overlayfs plugin.
+type Config struct {
+ // Root directory for the plugin
+ RootPath string `toml:"root_path"`
+}
+
+func init() {
+ plugin.Register(&plugin.Registration{
+ Type: plugin.SnapshotPlugin,
+ ID: "fuse-overlayfs",
+ Config: &Config{},
+ InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+ ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec())
+
+ config, ok := ic.Config.(*Config)
+ if !ok {
+ return nil, errors.New("invalid fuse-overlayfs configuration")
+ }
+
+ root := ic.Root
+ if config.RootPath != "" {
+ root = config.RootPath
+ }
+
+ ic.Meta.Exports["root"] = root
+ return fuseoverlayfs.NewSnapshotter(root)
+ },
+ })
+}
diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go
index 02957ddc4ed8..0dd002e8e068 100644
--- a/vendor/github.com/containerd/go-cni/result.go
+++ b/vendor/github.com/containerd/go-cni/result.go
@@ -57,9 +57,11 @@ func (r *Result) Raw() []*types100.Result {
}
type Config struct {
- IPConfigs []*IPConfig
- Mac string
- Sandbox string
+ IPConfigs []*IPConfig
+ Mac string
+ Sandbox string
+ PciID string
+ SocketPath string
}
// createResult creates a Result from the given slice of types100.Result, adding
@@ -84,8 +86,10 @@ func (c *libcni) createResult(results []*types100.Result) (*Result, error) {
// Walk through all the interface in each result
for _, intf := range result.Interfaces {
r.Interfaces[intf.Name] = &Config{
- Mac: intf.Mac,
- Sandbox: intf.Sandbox,
+ Mac: intf.Mac,
+ Sandbox: intf.Sandbox,
+ SocketPath: intf.SocketPath,
+ PciID: intf.PciID,
}
}
// Walk through all the IPs in the result and attach it to corresponding
diff --git a/vendor/github.com/containerd/imgcrypt/CHANGES b/vendor/github.com/containerd/imgcrypt/CHANGES
index 7ed15cd7d1a9..b60bf588cef8 100644
--- a/vendor/github.com/containerd/imgcrypt/CHANGES
+++ b/vendor/github.com/containerd/imgcrypt/CHANGES
@@ -1,5 +1,15 @@
CHANGES
+v1.1.10:
+ - Updated to ocicrypt v1.1.10
+ - Added test cases with JKW EC key and added 2 more RSA keys
+ - Sync'ed enc-ctr with ctr of containerd v1.6.30
+ - Updated dependencies
+
+v1.1.9:
+ - Updated to ocicrypt v1.1.9
+ - Updated dependencies
+
v1.1.8:
- Updated to containerd v1.6.23
- Sync'ed enc-ctr with ctr of containerd v1.6.23
diff --git a/vendor/github.com/containerd/imgcrypt/Makefile b/vendor/github.com/containerd/imgcrypt/Makefile
index df6d9b523767..5584a5fc7c05 100644
--- a/vendor/github.com/containerd/imgcrypt/Makefile
+++ b/vendor/github.com/containerd/imgcrypt/Makefile
@@ -34,10 +34,10 @@ build: $(BINARIES)
FORCE:
bin/ctd-decoder: cmd/ctd-decoder FORCE
- go build -o $@ -v ./cmd/ctd-decoder/
+ cd cmd && go build -o ../$@ -v ./ctd-decoder/
bin/ctr-enc: cmd/ctr FORCE
- go build -o $@ ${CTR_LDFLAGS} -v ./cmd/ctr/
+ cd cmd && go build -o ../$@ ${CTR_LDFLAGS} -v ./ctr/
check:
@echo "$@"
diff --git a/vendor/github.com/containerd/imgcrypt/README.md b/vendor/github.com/containerd/imgcrypt/README.md
index 0d00fa6a5594..9d394437506c 100644
--- a/vendor/github.com/containerd/imgcrypt/README.md
+++ b/vendor/github.com/containerd/imgcrypt/README.md
@@ -25,7 +25,8 @@ installation we use /tmp for directories. Also, we build containerd 1.3 from the
```
# cat config.toml
-disable_plugins = ["cri"]
+version = 2
+disabled_plugins = ["io.containerd.grpc.v1.cri"]
root = "/tmp/var/lib/containerd"
state = "/tmp/run/containerd"
[grpc]
diff --git a/vendor/github.com/containerd/plugin/.golangci.yml b/vendor/github.com/containerd/plugin/.golangci.yml
new file mode 100644
index 000000000000..a695775df49d
--- /dev/null
+++ b/vendor/github.com/containerd/plugin/.golangci.yml
@@ -0,0 +1,30 @@
+linters:
+ enable:
+ - exportloopref # Checks for pointers to enclosing loop variables
+ - gofmt
+ - goimports
+ - gosec
+ - ineffassign
+ - misspell
+ - nolintlint
+ - revive
+ - staticcheck
+ - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
+ - unconvert
+ - unused
+ - vet
+ - dupword # Checks for duplicate words in the source code
+ disable:
+ - errcheck
+
+run:
+ timeout: 5m
+ skip-dirs:
+ - api
+ - cluster
+ - design
+ - docs
+ - docs/man
+ - releases
+ - reports
+ - test # e2e scripts
diff --git a/vendor/github.com/containerd/plugin/LICENSE b/vendor/github.com/containerd/plugin/LICENSE
new file mode 100644
index 000000000000..584149b6ee28
--- /dev/null
+++ b/vendor/github.com/containerd/plugin/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/plugin/README.md b/vendor/github.com/containerd/plugin/README.md
new file mode 100644
index 000000000000..eb1684f68a57
--- /dev/null
+++ b/vendor/github.com/containerd/plugin/README.md
@@ -0,0 +1,19 @@
+# plugin
+
+A Go package providing a common plugin interface across containerd repositories.
+
+This package is intended to be imported by the main containerd repository as well as plugin implementations.
+By sharing a common implementations, plugins can register themselves without needing to import the main containerd repository.
+This plugin is intended to provide an interface and common functionality, but is not intended to define plugin types used by containerd.
+Plugins should copy plugin type strings to avoid creating unintended depdenencies.
+
+## Project details
+
+**plugin** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](/~https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](/~https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](/~https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](/~https://github.com/containerd/project) repository.
+
diff --git a/vendor/github.com/containerd/plugin/context.go b/vendor/github.com/containerd/plugin/context.go
new file mode 100644
index 000000000000..3b985ab75eb8
--- /dev/null
+++ b/vendor/github.com/containerd/plugin/context.go
@@ -0,0 +1,199 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package plugin
+
+import (
+ "context"
+ "fmt"
+
+ imagespec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// InitContext is used for plugin initialization
+type InitContext struct {
+ Context context.Context
+ Properties map[string]string
+ Config interface{}
+ RegisterReadiness func() func()
+
+ // Meta is metadata plugins can fill in at init
+ Meta *Meta
+
+ plugins *Set
+}
+
+// NewContext returns a new plugin InitContext
+func NewContext(ctx context.Context, plugins *Set, properties map[string]string) *InitContext {
+ if properties == nil {
+ properties = map[string]string{}
+ }
+ return &InitContext{
+ Context: ctx,
+ Properties: properties,
+ Meta: &Meta{
+ Exports: map[string]string{},
+ },
+ plugins: plugins,
+ }
+}
+
+// Meta contains information gathered from the registration and initialization
+// process.
+type Meta struct {
+ Platforms []imagespec.Platform // platforms supported by plugin
+ Exports map[string]string // values exported by plugin
+ Capabilities []string // feature switches for plugin
+}
+
+// Plugin represents an initialized plugin, used with an init context.
+type Plugin struct {
+ Registration Registration // registration, as initialized
+ Config interface{} // config, as initialized
+ Meta Meta
+
+ instance interface{}
+ err error // will be set if there was an error initializing the plugin
+}
+
+// Err returns the errors during initialization.
+// returns nil if no error was encountered
+func (p *Plugin) Err() error {
+ return p.err
+}
+
+// Instance returns the instance and any initialization error of the plugin
+func (p *Plugin) Instance() (interface{}, error) {
+ return p.instance, p.err
+}
+
+// Set defines a plugin collection, used with InitContext.
+//
+// This maintains ordering and unique indexing over the set.
+//
+// After iteratively instantiating plugins, this set should represent, the
+// ordered, initialization set of plugins for a containerd instance.
+type Set struct {
+ ordered []*Plugin // order of initialization
+ byTypeAndID map[Type]map[string]*Plugin
+}
+
+// NewPluginSet returns an initialized plugin set
+func NewPluginSet() *Set {
+ return &Set{
+ byTypeAndID: make(map[Type]map[string]*Plugin),
+ }
+}
+
+// Add a plugin to the set
+func (ps *Set) Add(p *Plugin) error {
+ if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok {
+ ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{
+ p.Registration.ID: p,
+ }
+ } else if _, idok := byID[p.Registration.ID]; !idok {
+ byID[p.Registration.ID] = p
+ } else {
+ return fmt.Errorf("plugin add failed for %s: %w", p.Registration.URI(), ErrPluginInitialized)
+ }
+
+ ps.ordered = append(ps.ordered, p)
+ return nil
+}
+
+// Get returns the plugin with the given type and id
+func (ps *Set) Get(t Type, id string) *Plugin {
+ p, ok := ps.byTypeAndID[t]
+ if !ok {
+ return nil
+ }
+ return p[id]
+}
+
+// GetAll returns all initialized plugins
+func (ps *Set) GetAll() []*Plugin {
+ return ps.ordered
+}
+
+// GetSingle returns a plugin instance of the given type when only a single instance
+// of that type is expected. Throws an ErrPluginNotFound if no plugin is found and
+// ErrPluginMultipleInstances when multiple instances are found.
+// Since plugins are not ordered, if multiple instances is suported then
+// GetByType should be used. If only one is expected, then to switch plugins,
+// disable or remove the unused plugins of the same type.
+func (i *InitContext) GetSingle(t Type) (interface{}, error) {
+ var (
+ found bool
+ instance interface{}
+ )
+ for _, v := range i.plugins.byTypeAndID[t] {
+ i, err := v.Instance()
+ if err != nil {
+ if IsSkipPlugin(err) {
+ continue
+ }
+ return i, err
+ }
+ if found {
+ return nil, fmt.Errorf("multiple plugins registered for %s: %w", t, ErrPluginMultipleInstances)
+ }
+ instance = i
+ found = true
+ }
+ if !found {
+ return nil, fmt.Errorf("no plugins registered for %s: %w", t, ErrPluginNotFound)
+ }
+ return instance, nil
+}
+
+// Plugins returns plugin set
+func (i *InitContext) Plugins() *Set {
+ return i.plugins
+}
+
+// GetAll plugins in the set
+func (i *InitContext) GetAll() []*Plugin {
+ return i.plugins.GetAll()
+}
+
+// GetByID returns the plugin of the given type and ID
+func (i *InitContext) GetByID(t Type, id string) (interface{}, error) {
+ p := i.plugins.Get(t, id)
+ if p == nil {
+ return nil, fmt.Errorf("no plugins registered for %s.%s: %w", t, id, ErrPluginNotFound)
+ }
+ return p.Instance()
+}
+
+// GetByType returns all plugins with the specific type.
+func (i *InitContext) GetByType(t Type) (map[string]interface{}, error) {
+ pi := map[string]interface{}{}
+ for id, p := range i.plugins.byTypeAndID[t] {
+ i, err := p.Instance()
+ if err != nil {
+ if IsSkipPlugin(err) {
+ continue
+ }
+ return nil, err
+ }
+ pi[id] = i
+ }
+ if len(pi) == 0 {
+ return nil, fmt.Errorf("no plugins registered for %s: %w", t, ErrPluginNotFound)
+ }
+
+ return pi, nil
+}
diff --git a/vendor/github.com/containerd/plugin/plugin.go b/vendor/github.com/containerd/plugin/plugin.go
new file mode 100644
index 000000000000..f7899e184f31
--- /dev/null
+++ b/vendor/github.com/containerd/plugin/plugin.go
@@ -0,0 +1,178 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package plugin
+
+import (
+ "context"
+ "errors"
+ "fmt"
+)
+
+var (
+ // ErrNoType is returned when no type is specified
+ ErrNoType = errors.New("plugin: no type")
+ // ErrNoPluginID is returned when no id is specified
+ ErrNoPluginID = errors.New("plugin: no id")
+ // ErrIDRegistered is returned when a duplicate id is already registered
+ ErrIDRegistered = errors.New("plugin: id already registered")
+ // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded,
+ // this allows the plugin loader differentiate between a plugin which is configured
+ // not to load and one that fails to load.
+ ErrSkipPlugin = errors.New("skip plugin")
+ // ErrPluginInitialized is used when a plugin is already initialized
+ ErrPluginInitialized = errors.New("plugin: already initialized")
+ // ErrPluginNotFound is used when a plugin is looked up but not found
+ ErrPluginNotFound = errors.New("plugin: not found")
+ // ErrPluginMultipleInstances is used when a plugin is expected a single instance but has multiple
+ ErrPluginMultipleInstances = errors.New("plugin: multiple instances")
+
+ // ErrInvalidRequires will be thrown if the requirements for a plugin are
+ // defined in an invalid manner.
+ ErrInvalidRequires = errors.New("invalid requires")
+)
+
+// IsSkipPlugin returns true if the error is skipping the plugin
+func IsSkipPlugin(err error) bool {
+ return errors.Is(err, ErrSkipPlugin)
+}
+
+// Type is the type of the plugin
+type Type string
+
+func (t Type) String() string { return string(t) }
+
+// Registration contains information for registering a plugin
+type Registration struct {
+ // Type of the plugin
+ Type Type
+ // ID of the plugin
+ ID string
+ // Config specific to the plugin
+ Config interface{}
+ // Requires is a list of plugins that the registered plugin requires to be available
+ Requires []Type
+
+ // InitFn is called when initializing a plugin. The registration and
+ // context are passed in. The init function may modify the registration to
+ // add exports, capabilities and platform support declarations.
+ InitFn func(*InitContext) (interface{}, error)
+
+ // ConfigMigration allows a plugin to migrate configurations from an older
+ // version to handle plugin renames or moving of features from one plugin
+ // to another in a later version.
+ // The configuration map is keyed off the plugin name and the value
+ // is the configuration for that objects, with the structure defined
+ // for the plugin. No validation is done on the value before performing
+ // the migration.
+ ConfigMigration func(context.Context, int, map[string]interface{}) error
+}
+
+// Init the registered plugin
+func (r Registration) Init(ic *InitContext) *Plugin {
+ p, err := r.InitFn(ic)
+ return &Plugin{
+ Registration: r,
+ Config: ic.Config,
+ Meta: *ic.Meta,
+ instance: p,
+ err: err,
+ }
+}
+
+// URI returns the full plugin URI
+func (r *Registration) URI() string {
+ return r.Type.String() + "." + r.ID
+}
+
+// DisableFilter filters out disabled plugins
+type DisableFilter func(r *Registration) bool
+
+// Registry is list of registrations which can be registered to and
+// produce a filtered and ordered output.
+// The Registry itself is immutable and the list will be copied
+// and appeneded to a new registry when new items are registered.
+type Registry []*Registration
+
+// Graph computes the ordered list of registrations based on their dependencies,
+// filtering out any plugins which match the provided filter.
+func (registry Registry) Graph(filter DisableFilter) []Registration {
+ disabled := map[*Registration]bool{}
+ for _, r := range registry {
+ if filter(r) {
+ disabled[r] = true
+ }
+ }
+
+ ordered := make([]Registration, 0, len(registry)-len(disabled))
+ added := map[*Registration]bool{}
+ for _, r := range registry {
+ if disabled[r] {
+ continue
+ }
+ children(r, registry, added, disabled, &ordered)
+ if !added[r] {
+ ordered = append(ordered, *r)
+ added[r] = true
+ }
+ }
+ return ordered
+}
+
+func children(reg *Registration, registry []*Registration, added, disabled map[*Registration]bool, ordered *[]Registration) {
+ for _, t := range reg.Requires {
+ for _, r := range registry {
+ if !disabled[r] && r.URI() != reg.URI() && (t == "*" || r.Type == t) {
+ children(r, registry, added, disabled, ordered)
+ if !added[r] {
+ *ordered = append(*ordered, *r)
+ added[r] = true
+ }
+ }
+ }
+ }
+}
+
+// Register adds the registration to a Registry and returns the
+// updated Registry, panicking if registration could not succeed.
+func (registry Registry) Register(r *Registration) Registry {
+ if r.Type == "" {
+ panic(ErrNoType)
+ }
+ if r.ID == "" {
+ panic(ErrNoPluginID)
+ }
+ if err := checkUnique(registry, r); err != nil {
+ panic(err)
+ }
+
+ for _, requires := range r.Requires {
+ if requires == "*" && len(r.Requires) != 1 {
+ panic(ErrInvalidRequires)
+ }
+ }
+
+ return append(registry, r)
+}
+
+func checkUnique(registry Registry, r *Registration) error {
+ for _, registered := range registry {
+ if r.URI() == registered.URI() {
+ return fmt.Errorf("%s: %w", r.URI(), ErrIDRegistered)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/plugin/registry/register.go b/vendor/github.com/containerd/plugin/registry/register.go
new file mode 100644
index 000000000000..57bb201bfc35
--- /dev/null
+++ b/vendor/github.com/containerd/plugin/registry/register.go
@@ -0,0 +1,50 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package registry
+
+import (
+ "sync"
+
+ "github.com/containerd/plugin"
+)
+
+var register = struct {
+ sync.RWMutex
+ r plugin.Registry
+}{}
+
+// Register allows plugins to register
+func Register(r *plugin.Registration) {
+ register.Lock()
+ defer register.Unlock()
+ register.r = register.r.Register(r)
+}
+
+// Reset removes all global registrations
+func Reset() {
+ register.Lock()
+ defer register.Unlock()
+ register.r = nil
+}
+
+// Graph returns an ordered list of registered plugins for initialization.
+// Plugins in disableList specified by id will be disabled.
+func Graph(filter plugin.DisableFilter) []plugin.Registration {
+ register.RLock()
+ defer register.RUnlock()
+ return register.r.Graph(filter)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/LICENSE
similarity index 100%
rename from vendor/github.com/go-jose/go-jose/v3/LICENSE
rename to vendor/github.com/containerd/stargz-snapshotter/LICENSE
diff --git a/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md b/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md
new file mode 100644
index 000000000000..c907e4216c9c
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md
@@ -0,0 +1,67 @@
+The source code developed under the Stargz Snapshotter Project is licensed under Apache License 2.0.
+
+However, the Stargz Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header.
+
+```
+Copyright 2019 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style
+license that can be found in the NOTICE.md file.
+```
+
+These source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following.
+
+```
+Copyright (c) 2019 Google LLC. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+```
+
+The Stargz Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. These source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header.
+
+```
+The MIT License (MIT)
+
+Copyright (c) 2015 Tintri
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+```
diff --git a/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go b/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go
new file mode 100644
index 000000000000..417ae4b2b822
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go
@@ -0,0 +1,440 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cache
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/util/cacheutil"
+ "github.com/containerd/stargz-snapshotter/util/namedmutex"
+ "github.com/hashicorp/go-multierror"
+)
+
+const (
+ defaultMaxLRUCacheEntry = 10
+ defaultMaxCacheFds = 10
+)
+
+type DirectoryCacheConfig struct {
+
+ // Number of entries of LRU cache (default: 10).
+ // This won't be used when DataCache is specified.
+ MaxLRUCacheEntry int
+
+ // Number of file descriptors to cache (default: 10).
+ // This won't be used when FdCache is specified.
+ MaxCacheFds int
+
+ // On Add, wait until the data is fully written to the cache directory.
+ SyncAdd bool
+
+ // DataCache is an on-memory cache of the data.
+ // OnEvicted will be overridden and replaced for internal use.
+ DataCache *cacheutil.LRUCache
+
+ // FdCache is a cache for opened file descriptors.
+ // OnEvicted will be overridden and replaced for internal use.
+ FdCache *cacheutil.LRUCache
+
+ // BufPool will be used for pooling bytes.Buffer.
+ BufPool *sync.Pool
+
+ // Direct forcefully enables direct mode for all operation in cache.
+ // Thus operation won't use on-memory caches.
+ Direct bool
+}
+
+// TODO: contents validation.
+
+// BlobCache represents a cache for bytes data
+type BlobCache interface {
+ // Add returns a writer to add contents to cache
+ Add(key string, opts ...Option) (Writer, error)
+
+ // Get returns a reader to read the specified contents
+ // from cache
+ Get(key string, opts ...Option) (Reader, error)
+
+ // Close closes the cache
+ Close() error
+}
+
+// Reader provides the data cached.
+type Reader interface {
+ io.ReaderAt
+ Close() error
+}
+
+// Writer enables the client to cache byte data. Commit() must be
+// called after data is fully written to Write(). To abort the written
+// data, Abort() must be called.
+type Writer interface {
+ io.WriteCloser
+ Commit() error
+ Abort() error
+}
+
+type cacheOpt struct {
+ direct bool
+}
+
+type Option func(o *cacheOpt) *cacheOpt
+
+// Direct option lets FetchAt and Add methods not to use on-memory caches. When
+// you know that the targeting value won't be used immediately, you can prevent
+// the limited space of on-memory caches from being polluted by these unimportant
+// values.
+func Direct() Option {
+ return func(o *cacheOpt) *cacheOpt {
+ o.direct = true
+ return o
+ }
+}
+
+func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache, error) {
+ if !filepath.IsAbs(directory) {
+ return nil, fmt.Errorf("dir cache path must be an absolute path; got %q", directory)
+ }
+ bufPool := config.BufPool
+ if bufPool == nil {
+ bufPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+ }
+ dataCache := config.DataCache
+ if dataCache == nil {
+ maxEntry := config.MaxLRUCacheEntry
+ if maxEntry == 0 {
+ maxEntry = defaultMaxLRUCacheEntry
+ }
+ dataCache = cacheutil.NewLRUCache(maxEntry)
+ dataCache.OnEvicted = func(key string, value interface{}) {
+ value.(*bytes.Buffer).Reset()
+ bufPool.Put(value)
+ }
+ }
+ fdCache := config.FdCache
+ if fdCache == nil {
+ maxEntry := config.MaxCacheFds
+ if maxEntry == 0 {
+ maxEntry = defaultMaxCacheFds
+ }
+ fdCache = cacheutil.NewLRUCache(maxEntry)
+ fdCache.OnEvicted = func(key string, value interface{}) {
+ value.(*os.File).Close()
+ }
+ }
+ if err := os.MkdirAll(directory, 0700); err != nil {
+ return nil, err
+ }
+ wipdir := filepath.Join(directory, "wip")
+ if err := os.MkdirAll(wipdir, 0700); err != nil {
+ return nil, err
+ }
+ dc := &directoryCache{
+ cache: dataCache,
+ fileCache: fdCache,
+ wipLock: new(namedmutex.NamedMutex),
+ directory: directory,
+ wipDirectory: wipdir,
+ bufPool: bufPool,
+ direct: config.Direct,
+ }
+ dc.syncAdd = config.SyncAdd
+ return dc, nil
+}
+
+// directoryCache is a cache implementation which backend is a directory.
+type directoryCache struct {
+ cache *cacheutil.LRUCache
+ fileCache *cacheutil.LRUCache
+ wipDirectory string
+ directory string
+ wipLock *namedmutex.NamedMutex
+
+ bufPool *sync.Pool
+
+ syncAdd bool
+ direct bool
+
+ closed bool
+ closedMu sync.Mutex
+}
+
+func (dc *directoryCache) Get(key string, opts ...Option) (Reader, error) {
+ if dc.isClosed() {
+ return nil, fmt.Errorf("cache is already closed")
+ }
+
+ opt := &cacheOpt{}
+ for _, o := range opts {
+ opt = o(opt)
+ }
+
+ if !dc.direct && !opt.direct {
+ // Get data from memory
+ if b, done, ok := dc.cache.Get(key); ok {
+ return &reader{
+ ReaderAt: bytes.NewReader(b.(*bytes.Buffer).Bytes()),
+ closeFunc: func() error {
+ done()
+ return nil
+ },
+ }, nil
+ }
+
+ // Get data from disk. If the file is already opened, use it.
+ if f, done, ok := dc.fileCache.Get(key); ok {
+ return &reader{
+ ReaderAt: f.(*os.File),
+ closeFunc: func() error {
+ done() // file will be closed when it's evicted from the cache
+ return nil
+ },
+ }, nil
+ }
+ }
+
+ // Open the cache file and read the target region
+ // TODO: If the target cache is write-in-progress, should we wait for the completion
+ // or simply report the cache miss?
+ file, err := os.Open(dc.cachePath(key))
+ if err != nil {
+ return nil, fmt.Errorf("failed to open blob file for %q: %w", key, err)
+ }
+
+ // If "direct" option is specified, do not cache the file on memory.
+ // This option is useful for preventing memory cache from being polluted by data
+ // that won't be accessed immediately.
+ if dc.direct || opt.direct {
+ return &reader{
+ ReaderAt: file,
+ closeFunc: func() error { return file.Close() },
+ }, nil
+ }
+
+ // TODO: should we cache the entire file data on memory?
+ // but making I/O (possibly huge) on every fetching
+ // might be costly.
+ return &reader{
+ ReaderAt: file,
+ closeFunc: func() error {
+ _, done, added := dc.fileCache.Add(key, file)
+ defer done() // Release it immediately. Cleaned up on eviction.
+ if !added {
+ return file.Close() // file already exists in the cache. close it.
+ }
+ return nil
+ },
+ }, nil
+}
+
+func (dc *directoryCache) Add(key string, opts ...Option) (Writer, error) {
+ if dc.isClosed() {
+ return nil, fmt.Errorf("cache is already closed")
+ }
+
+ opt := &cacheOpt{}
+ for _, o := range opts {
+ opt = o(opt)
+ }
+
+ wip, err := dc.wipFile(key)
+ if err != nil {
+ return nil, err
+ }
+ w := &writer{
+ WriteCloser: wip,
+ commitFunc: func() error {
+ if dc.isClosed() {
+ return fmt.Errorf("cache is already closed")
+ }
+ // Commit the cache contents
+ c := dc.cachePath(key)
+ if err := os.MkdirAll(filepath.Dir(c), os.ModePerm); err != nil {
+ var allErr error
+ if err := os.Remove(wip.Name()); err != nil {
+ allErr = multierror.Append(allErr, err)
+ }
+ return multierror.Append(allErr,
+ fmt.Errorf("failed to create cache directory %q: %w", c, err))
+ }
+ return os.Rename(wip.Name(), c)
+ },
+ abortFunc: func() error {
+ return os.Remove(wip.Name())
+ },
+ }
+
+ // If "direct" option is specified, do not cache the passed data on memory.
+ // This option is useful for preventing memory cache from being polluted by data
+ // that won't be accessed immediately.
+ if dc.direct || opt.direct {
+ return w, nil
+ }
+
+ b := dc.bufPool.Get().(*bytes.Buffer)
+ memW := &writer{
+ WriteCloser: nopWriteCloser(io.Writer(b)),
+ commitFunc: func() error {
+ if dc.isClosed() {
+ w.Close()
+ return fmt.Errorf("cache is already closed")
+ }
+ cached, done, added := dc.cache.Add(key, b)
+ if !added {
+ dc.putBuffer(b) // already exists in the cache. abort it.
+ }
+ commit := func() error {
+ defer done()
+ defer w.Close()
+ n, err := w.Write(cached.(*bytes.Buffer).Bytes())
+ if err != nil || n != cached.(*bytes.Buffer).Len() {
+ w.Abort()
+ return err
+ }
+ return w.Commit()
+ }
+ if dc.syncAdd {
+ return commit()
+ }
+ go func() {
+ if err := commit(); err != nil {
+ fmt.Println("failed to commit to file:", err)
+ }
+ }()
+ return nil
+ },
+ abortFunc: func() error {
+ defer w.Close()
+ defer w.Abort()
+ dc.putBuffer(b) // abort it.
+ return nil
+ },
+ }
+
+ return memW, nil
+}
+
+func (dc *directoryCache) putBuffer(b *bytes.Buffer) {
+ b.Reset()
+ dc.bufPool.Put(b)
+}
+
+func (dc *directoryCache) Close() error {
+ dc.closedMu.Lock()
+ defer dc.closedMu.Unlock()
+ if dc.closed {
+ return nil
+ }
+ dc.closed = true
+ return os.RemoveAll(dc.directory)
+}
+
+func (dc *directoryCache) isClosed() bool {
+ dc.closedMu.Lock()
+ closed := dc.closed
+ dc.closedMu.Unlock()
+ return closed
+}
+
+func (dc *directoryCache) cachePath(key string) string {
+ return filepath.Join(dc.directory, key[:2], key)
+}
+
+func (dc *directoryCache) wipFile(key string) (*os.File, error) {
+ return os.CreateTemp(dc.wipDirectory, key+"-*")
+}
+
+func NewMemoryCache() BlobCache {
+ return &MemoryCache{
+ Membuf: map[string]*bytes.Buffer{},
+ }
+}
+
+// MemoryCache is a cache implementation which backend is a memory.
+type MemoryCache struct {
+ Membuf map[string]*bytes.Buffer
+ mu sync.Mutex
+}
+
+func (mc *MemoryCache) Get(key string, opts ...Option) (Reader, error) {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+ b, ok := mc.Membuf[key]
+ if !ok {
+ return nil, fmt.Errorf("Missed cache: %q", key)
+ }
+ return &reader{bytes.NewReader(b.Bytes()), func() error { return nil }}, nil
+}
+
+func (mc *MemoryCache) Add(key string, opts ...Option) (Writer, error) {
+ b := new(bytes.Buffer)
+ return &writer{
+ WriteCloser: nopWriteCloser(io.Writer(b)),
+ commitFunc: func() error {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+ mc.Membuf[key] = b
+ return nil
+ },
+ abortFunc: func() error { return nil },
+ }, nil
+}
+
+func (mc *MemoryCache) Close() error {
+ return nil
+}
+
+type reader struct {
+ io.ReaderAt
+ closeFunc func() error
+}
+
+func (r *reader) Close() error { return r.closeFunc() }
+
+type writer struct {
+ io.WriteCloser
+ commitFunc func() error
+ abortFunc func() error
+}
+
+func (w *writer) Commit() error {
+ return w.commitFunc()
+}
+
+func (w *writer) Abort() error {
+ return w.abortFunc()
+}
+
+type writeCloser struct {
+ io.Writer
+ closeFunc func() error
+}
+
+func (w *writeCloser) Close() error { return w.closeFunc() }
+
+func nopWriteCloser(w io.Writer) io.WriteCloser {
+ return &writeCloser{w, func() error { return nil }}
+}
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
similarity index 100%
rename from vendor/google.golang.org/appengine/LICENSE
rename to vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
new file mode 100644
index 000000000000..6aba0ef1f692
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
@@ -0,0 +1,689 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sync/errgroup"
+)
+
+type options struct {
+ chunkSize int
+ compressionLevel int
+ prioritizedFiles []string
+ missedPrioritizedFiles *[]string
+ compression Compression
+ ctx context.Context
+ minChunkSize int
+}
+
+type Option func(o *options) error
+
+// WithChunkSize option specifies the chunk size of eStargz blob to build.
+func WithChunkSize(chunkSize int) Option {
+ return func(o *options) error {
+ o.chunkSize = chunkSize
+ return nil
+ }
+}
+
+// WithCompressionLevel option specifies the gzip compression level.
+// The default is gzip.BestCompression.
+// This option will be ignored if WithCompression option is used.
+// See also: https://godoc.org/compress/gzip#pkg-constants
+func WithCompressionLevel(level int) Option {
+ return func(o *options) error {
+ o.compressionLevel = level
+ return nil
+ }
+}
+
+// WithPrioritizedFiles option specifies the list of prioritized files.
+// These files must be complete paths that are absolute or relative to "/"
+// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
+// are treated as "/foo/bar".
+func WithPrioritizedFiles(files []string) Option {
+ return func(o *options) error {
+ o.prioritizedFiles = files
+ return nil
+ }
+}
+
+// WithAllowPrioritizeNotFound makes Build continue the execution even if some
+// of prioritized files specified by WithPrioritizedFiles option aren't found
+// in the input tar. Instead, this records all missed file names to the passed
+// slice.
+func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
+ return func(o *options) error {
+ if missedFiles == nil {
+ return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
+ }
+ o.missedPrioritizedFiles = missedFiles
+ return nil
+ }
+}
+
+// WithCompression specifies compression algorithm to be used.
+// Default is gzip.
+func WithCompression(compression Compression) Option {
+ return func(o *options) error {
+ o.compression = compression
+ return nil
+ }
+}
+
+// WithContext specifies a context that can be used for clean canceleration.
+func WithContext(ctx context.Context) Option {
+ return func(o *options) error {
+ o.ctx = ctx
+ return nil
+ }
+}
+
+// WithMinChunkSize option specifies the minimal number of bytes of data
+// must be written in one gzip stream.
+// By increasing this number, one gzip stream can contain multiple files
+// and it hopefully leads to smaller result blob.
+// NOTE: This adds a TOC property that old reader doesn't understand.
+func WithMinChunkSize(minChunkSize int) Option {
+ return func(o *options) error {
+ o.minChunkSize = minChunkSize
+ return nil
+ }
+}
+
+// Blob is an eStargz blob.
+type Blob struct {
+ io.ReadCloser
+ diffID digest.Digester
+ tocDigest digest.Digest
+}
+
+// DiffID returns the digest of uncompressed blob.
+// It is only valid to call DiffID after Close.
+func (b *Blob) DiffID() digest.Digest {
+ return b.diffID.Digest()
+}
+
+// TOCDigest returns the digest of uncompressed TOC JSON.
+func (b *Blob) TOCDigest() digest.Digest {
+ return b.tocDigest
+}
+
+// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
+// or plain tar) passed through the argument. If there are some prioritized files are listed in
+// the option, these files are grouped as "prioritized" and can be used for runtime optimization
+// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
+// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
+func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
+ var opts options
+ opts.compressionLevel = gzip.BestCompression // BestCompression by default
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+ if opts.compression == nil {
+ opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
+ }
+ layerFiles := newTempFiles()
+ ctx := opts.ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ select {
+ case <-done:
+ // nop
+ case <-ctx.Done():
+ layerFiles.CleanupAll()
+ }
+ }()
+ defer func() {
+ if rErr != nil {
+ if err := layerFiles.CleanupAll(); err != nil {
+ rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
+ }
+ }
+ if cErr := ctx.Err(); cErr != nil {
+ rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
+ }
+ }()
+ tarBlob, err := decompressBlob(tarBlob, layerFiles)
+ if err != nil {
+ return nil, err
+ }
+ entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
+ if err != nil {
+ return nil, err
+ }
+ var tarParts [][]*entry
+ if opts.minChunkSize > 0 {
+ // Each entry needs to know the size of the current gzip stream so they
+ // cannot be processed in parallel.
+ tarParts = [][]*entry{entries}
+ } else {
+ tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
+ }
+ writers := make([]*Writer, len(tarParts))
+ payloads := make([]*os.File, len(tarParts))
+ var mu sync.Mutex
+ var eg errgroup.Group
+ for i, parts := range tarParts {
+ i, parts := i, parts
+ // builds verifiable stargz sub-blobs
+ eg.Go(func() error {
+ esgzFile, err := layerFiles.TempFile("", "esgzdata")
+ if err != nil {
+ return err
+ }
+ sw := NewWriterWithCompressor(esgzFile, opts.compression)
+ sw.ChunkSize = opts.chunkSize
+ sw.MinChunkSize = opts.minChunkSize
+ if sw.needsOpenGzEntries == nil {
+ sw.needsOpenGzEntries = make(map[string]struct{})
+ }
+ for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
+ sw.needsOpenGzEntries[f] = struct{}{}
+ }
+ if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
+ return err
+ }
+ mu.Lock()
+ writers[i] = sw
+ payloads[i] = esgzFile
+ mu.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ rErr = err
+ return nil, err
+ }
+ tocAndFooter, tocDgst, err := closeWithCombine(writers...)
+ if err != nil {
+ rErr = err
+ return nil, err
+ }
+ var rs []io.Reader
+ for _, p := range payloads {
+ fs, err := fileSectionReader(p)
+ if err != nil {
+ return nil, err
+ }
+ rs = append(rs, fs)
+ }
+ diffID := digest.Canonical.Digester()
+ pr, pw := io.Pipe()
+ go func() {
+ r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer r.Close()
+ if _, err := io.Copy(diffID.Hash(), r); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ pw.Close()
+ }()
+ return &Blob{
+ ReadCloser: readCloser{
+ Reader: pr,
+ closeFunc: layerFiles.CleanupAll,
+ },
+ tocDigest: tocDgst,
+ diffID: diffID,
+ }, nil
+}
+
+// closeWithCombine takes unclosed Writers and close them. This also returns the
+// toc that combined all Writers into.
+// Writers doesn't write TOC and footer to the underlying writers so they can be
+// combined into a single eStargz and tocAndFooter returned by this function can
+// be appended at the tail of that combined blob.
+func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
+ if len(ws) == 0 {
+ return nil, "", fmt.Errorf("at least one writer must be passed")
+ }
+ for _, w := range ws {
+ if w.closed {
+ return nil, "", fmt.Errorf("writer must be unclosed")
+ }
+ defer func(w *Writer) { w.closed = true }(w)
+ if err := w.closeGz(); err != nil {
+ return nil, "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return nil, "", err
+ }
+ }
+ var (
+ mtoc = new(JTOC)
+ currentOffset int64
+ )
+ mtoc.Version = ws[0].toc.Version
+ for _, w := range ws {
+ for _, e := range w.toc.Entries {
+ // Recalculate Offset of non-empty files/chunks
+ if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
+ e.Offset += currentOffset
+ }
+ mtoc.Entries = append(mtoc.Entries, e)
+ }
+ if w.toc.Version > mtoc.Version {
+ mtoc.Version = w.toc.Version
+ }
+ currentOffset += w.cw.n
+ }
+
+ return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
+}
+
+func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
+ buf := new(bytes.Buffer)
+ tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ return buf, tocDigest, nil
+}
+
+// divideEntries divides passed entries to the parts at least the number specified by the
+// argument.
+func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
+ var estimatedSize int64
+ for _, e := range entries {
+ estimatedSize += e.header.Size
+ }
+ unitSize := estimatedSize / int64(minPartsNum)
+ var (
+ nextEnd = unitSize
+ offset int64
+ )
+ set = append(set, []*entry{})
+ for _, e := range entries {
+ set[len(set)-1] = append(set[len(set)-1], e)
+ offset += e.header.Size
+ if offset > nextEnd {
+ set = append(set, []*entry{})
+ nextEnd += unitSize
+ }
+ }
+ return
+}
+
+var errNotFound = errors.New("not found")
+
+// sortEntries reads the specified tar blob and returns a list of tar entries.
+// If some of prioritized files are specified, the list starts from these
+// files with keeping the order specified by the argument.
+func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
+
+ // Import tar file.
+ intar, err := importTar(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to sort: %w", err)
+ }
+
+ // Sort the tar file respecting to the prioritized files list.
+ sorted := &tarFile{}
+ for _, l := range prioritized {
+ if err := moveRec(l, intar, sorted); err != nil {
+ if errors.Is(err, errNotFound) && missedPrioritized != nil {
+ *missedPrioritized = append(*missedPrioritized, l)
+ continue // allow not found
+ }
+ return nil, fmt.Errorf("failed to sort tar entries: %w", err)
+ }
+ }
+ if len(prioritized) == 0 {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ } else {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ }
+
+ // Dump all entry and concatinate them.
+ return append(sorted.dump(), intar.dump()...), nil
+}
+
+// readerFromEntries returns a reader of tar archive that contains entries passed
+// through the arguments.
+func readerFromEntries(entries ...*entry) io.Reader {
+ pr, pw := io.Pipe()
+ go func() {
+ tw := tar.NewWriter(pw)
+ defer tw.Close()
+ for _, entry := range entries {
+ if err := tw.WriteHeader(entry.header); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
+ return
+ }
+ if _, err := io.Copy(tw, entry.payload); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
+ return
+ }
+ }
+ pw.Close()
+ }()
+ return pr
+}
+
+func importTar(in io.ReaderAt) (*tarFile, error) {
+ tf := &tarFile{}
+ pw, err := newCountReadSeeker(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make position watcher: %w", err)
+ }
+ tr := tar.NewReader(pw)
+
+ // Walk through all nodes.
+ for {
+ // Fetch and parse next header.
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("failed to parse tar file, %w", err)
+ }
+ switch cleanEntryName(h.Name) {
+ case PrefetchLandmark, NoPrefetchLandmark:
+ // Ignore existing landmark
+ continue
+ }
+
+ // Add entry. If it already exists, replace it.
+ if _, ok := tf.get(h.Name); ok {
+ tf.remove(h.Name)
+ }
+ tf.add(&entry{
+ header: h,
+ payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
+ })
+ }
+
+ return tf, nil
+}
+
+func moveRec(name string, in *tarFile, out *tarFile) error {
+ name = cleanEntryName(name)
+ if name == "" { // root directory. stop recursion.
+ if e, ok := in.get(name); ok {
+ // entry of the root directory exists. we should move it as well.
+ // this case will occur if tar entries are prefixed with "./", "/", etc.
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+ }
+
+ _, okIn := in.get(name)
+ _, okOut := out.get(name)
+ if !okIn && !okOut {
+ return fmt.Errorf("file: %q: %w", name, errNotFound)
+ }
+
+ parent, _ := path.Split(strings.TrimSuffix(name, "/"))
+ if err := moveRec(parent, in, out); err != nil {
+ return err
+ }
+ if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
+ if err := moveRec(e.header.Linkname, in, out); err != nil {
+ return err
+ }
+ }
+ if e, ok := in.get(name); ok {
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+}
+
+type entry struct {
+ header *tar.Header
+ payload io.ReadSeeker
+}
+
+type tarFile struct {
+ index map[string]*entry
+ stream []*entry
+}
+
+func (f *tarFile) add(e *entry) {
+ if f.index == nil {
+ f.index = make(map[string]*entry)
+ }
+ f.index[cleanEntryName(e.header.Name)] = e
+ f.stream = append(f.stream, e)
+}
+
+func (f *tarFile) remove(name string) {
+ name = cleanEntryName(name)
+ if f.index != nil {
+ delete(f.index, name)
+ }
+ var filtered []*entry
+ for _, e := range f.stream {
+ if cleanEntryName(e.header.Name) == name {
+ continue
+ }
+ filtered = append(filtered, e)
+ }
+ f.stream = filtered
+}
+
+func (f *tarFile) get(name string) (e *entry, ok bool) {
+ if f.index == nil {
+ return nil, false
+ }
+ e, ok = f.index[cleanEntryName(name)]
+ return
+}
+
+func (f *tarFile) dump() []*entry {
+ return f.stream
+}
+
+type readCloser struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (rc readCloser) Close() error {
+ return rc.closeFunc()
+}
+
+func fileSectionReader(file *os.File) (*io.SectionReader, error) {
+ info, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(file, 0, info.Size()), nil
+}
+
+func newTempFiles() *tempFiles {
+ return &tempFiles{}
+}
+
+type tempFiles struct {
+ files []*os.File
+ filesMu sync.Mutex
+ cleanupOnce sync.Once
+}
+
+func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
+ f, err := os.CreateTemp(dir, pattern)
+ if err != nil {
+ return nil, err
+ }
+ tf.filesMu.Lock()
+ tf.files = append(tf.files, f)
+ tf.filesMu.Unlock()
+ return f, nil
+}
+
+func (tf *tempFiles) CleanupAll() (err error) {
+ tf.cleanupOnce.Do(func() {
+ err = tf.cleanupAll()
+ })
+ return
+}
+
+func (tf *tempFiles) cleanupAll() error {
+ tf.filesMu.Lock()
+ defer tf.filesMu.Unlock()
+ var allErr []error
+ for _, f := range tf.files {
+ if err := f.Close(); err != nil {
+ allErr = append(allErr, err)
+ }
+ if err := os.Remove(f.Name()); err != nil {
+ allErr = append(allErr, err)
+ }
+ }
+ tf.files = nil
+ return errorutil.Aggregate(allErr)
+}
+
+func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
+ pos := int64(0)
+ return &countReadSeeker{r: r, cPos: &pos}, nil
+}
+
+type countReadSeeker struct {
+ r io.ReaderAt
+ cPos *int64
+
+ mu sync.Mutex
+}
+
+func (cr *countReadSeeker) Read(p []byte) (int, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ n, err := cr.r.ReadAt(p, *cr.cPos)
+ if err == nil {
+ *cr.cPos += int64(n)
+ }
+ return n, err
+}
+
+func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ switch whence {
+ default:
+ return 0, fmt.Errorf("Unknown whence: %v", whence)
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += *cr.cPos
+ case io.SeekEnd:
+ return 0, fmt.Errorf("Unsupported whence: %v", whence)
+ }
+
+ if offset < 0 {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ *cr.cPos = offset
+ return offset, nil
+}
+
+func (cr *countReadSeeker) currentPos() int64 {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ return *cr.cPos
+}
+
+func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
+ if org.Size() < 4 {
+ return org, nil
+ }
+ src := make([]byte, 4)
+ if _, err := org.Read(src); err != nil && err != io.EOF {
+ return nil, err
+ }
+ var dR io.Reader
+ if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
+ // gzip
+ dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dgR.Close()
+ dR = io.Reader(dgR)
+ } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
+ // zstd
+ dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dzR.Close()
+ dR = io.Reader(dzR)
+ } else {
+ // uncompressed
+ return io.NewSectionReader(org, 0, org.Size()), nil
+ }
+ b, err := tmp.TempFile("", "uncompresseddata")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(b, dR); err != nil {
+ return nil, err
+ }
+ return fileSectionReader(b)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
new file mode 100644
index 000000000000..6de78b02dcdd
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errorutil
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Aggregate combines a list of errors into a single new error.
+func Aggregate(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ points := make([]string, len(errs)+1)
+ points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
+ for i, err := range errs {
+ points[i+1] = fmt.Sprintf("* %s", err)
+ }
+ return errors.New(strings.Join(points, "\n\t"))
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
new file mode 100644
index 000000000000..f4d55465584e
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -0,0 +1,1223 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+// A Reader permits random access reads from a stargz file.
+type Reader struct {
+ sr *io.SectionReader
+ toc *JTOC
+ tocDigest digest.Digest
+
+ // m stores all non-chunk entries, keyed by name.
+ m map[string]*TOCEntry
+
+ // chunks stores all TOCEntry values for regular files that
+ // are split up. For a file with a single chunk, it's only
+ // stored in m.
+ chunks map[string][]*TOCEntry
+
+ decompressor Decompressor
+}
+
+type openOpts struct {
+ tocOffset int64
+ decompressors []Decompressor
+ telemetry *Telemetry
+}
+
+// OpenOption is an option used during opening the layer
+type OpenOption func(o *openOpts) error
+
+// WithTOCOffset option specifies the offset of TOC
+func WithTOCOffset(tocOffset int64) OpenOption {
+ return func(o *openOpts) error {
+ o.tocOffset = tocOffset
+ return nil
+ }
+}
+
+// WithDecompressors option specifies decompressors to use.
+// Default is gzip-based decompressor.
+func WithDecompressors(decompressors ...Decompressor) OpenOption {
+ return func(o *openOpts) error {
+ o.decompressors = decompressors
+ return nil
+ }
+}
+
+// WithTelemetry option specifies the telemetry hooks
+func WithTelemetry(telemetry *Telemetry) OpenOption {
+ return func(o *openOpts) error {
+ o.telemetry = telemetry
+ return nil
+ }
+}
+
+// MeasureLatencyHook is a func which takes start time and records the diff
+type MeasureLatencyHook func(time.Time)
+
+// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
+// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
+type Telemetry struct {
+ GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
+ GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds)
+ DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds)
+}
+
+// Open opens a stargz file for reading.
+// The behavior is configurable using options.
+//
+// Note that each entry name is normalized as the path that is relative to root.
+func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
+ var opts openOpts
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+
+ gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)}
+ decompressors := append(gzipCompressors, opts.decompressors...)
+
+ // Determine the size to fetch. Try to fetch as many bytes as possible.
+ fetchSize := maxFooterSize(sr.Size(), decompressors...)
+ if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize {
+ if maybeTocOffset > sr.Size() {
+ return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size())
+ }
+ fetchSize = sr.Size() - maybeTocOffset
+ }
+
+ start := time.Now() // before getting layer footer
+ footer := make([]byte, fetchSize)
+ if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil {
+ return nil, fmt.Errorf("error reading footer: %v", err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil {
+ opts.telemetry.GetFooterLatency(start)
+ }
+
+ var allErr []error
+ var found bool
+ var r *Reader
+ for _, d := range decompressors {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ maybeTocBytes := footer[:fOffset]
+ _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
+ if err != nil {
+ allErr = append(allErr, err)
+ continue
+ }
+ if tocOffset >= 0 && tocSize <= 0 {
+ tocSize = sr.Size() - tocOffset - fSize
+ }
+ if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
+ maybeTocBytes = maybeTocBytes[:tocSize]
+ }
+ r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
+ if err == nil {
+ found = true
+ break
+ }
+ allErr = append(allErr, err)
+ }
+ if !found {
+ return nil, errorutil.Aggregate(allErr)
+ }
+ if err := r.initFields(); err != nil {
+ return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
+ }
+ return r, nil
+}
+
+// OpenFooter extracts and parses footer from the given blob.
+// only supports gzip-based eStargz.
+func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
+ if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
+ return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
+ }
+ var footer [FooterSize]byte
+ if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
+ return 0, 0, fmt.Errorf("error reading footer: %v", err)
+ }
+ var allErr []error
+ for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ _, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
+ if err == nil {
+ return tocOffset, fSize, err
+ }
+ allErr = append(allErr, err)
+ }
+ return 0, 0, errorutil.Aggregate(allErr)
+}
+
+// initFields populates the Reader from r.toc after decoding it from
+// JSON.
+//
+// Unexported fields are populated and TOCEntry fields that were
+// implicit in the JSON are populated.
+func (r *Reader) initFields() error {
+ r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
+ r.chunks = make(map[string][]*TOCEntry)
+ var lastPath string
+ uname := map[int]string{}
+ gname := map[int]string{}
+ var lastRegEnt *TOCEntry
+ var chunkTopIndex int
+ for i, ent := range r.toc.Entries {
+ ent.Name = cleanEntryName(ent.Name)
+ switch ent.Type {
+ case "reg", "chunk":
+ if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
+ chunkTopIndex = i
+ }
+ ent.chunkTopIndex = chunkTopIndex
+ }
+ if ent.Type == "reg" {
+ lastRegEnt = ent
+ }
+ if ent.Type == "chunk" {
+ ent.Name = lastPath
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ if ent.ChunkSize == 0 && lastRegEnt != nil {
+ ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
+ }
+ } else {
+ lastPath = ent.Name
+
+ if ent.Uname != "" {
+ uname[ent.UID] = ent.Uname
+ } else {
+ ent.Uname = uname[ent.UID]
+ }
+ if ent.Gname != "" {
+ gname[ent.GID] = ent.Gname
+ } else {
+ ent.Gname = uname[ent.GID]
+ }
+
+ ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
+
+ if ent.Type == "dir" {
+ ent.NumLink++ // Parent dir links to this directory
+ }
+ r.m[ent.Name] = ent
+ }
+ if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
+ r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ }
+ if ent.ChunkSize == 0 && ent.Size != 0 {
+ ent.ChunkSize = ent.Size
+ }
+ }
+
+ // Populate children, add implicit directories:
+ for _, ent := range r.toc.Entries {
+ if ent.Type == "chunk" {
+ continue
+ }
+ // add "foo/":
+ // add "foo" child to "" (creating "" if necessary)
+ //
+ // add "foo/bar/":
+ // add "bar" child to "foo" (creating "foo" if necessary)
+ //
+ // add "foo/bar.txt":
+ // add "bar.txt" child to "foo" (creating "foo" if necessary)
+ //
+ // add "a/b/c/d/e/f.txt":
+ // create "a/b/c/d/e" node
+ // add "f.txt" child to "e"
+
+ name := ent.Name
+ pdirName := parentDir(name)
+ if name == pdirName {
+ // This entry and its parent are the same.
+ // Ignore this for avoiding infinite loop of the reference.
+ // The example case where this can occur is when tar contains the root
+ // directory itself (e.g. "./", "/").
+ continue
+ }
+ pdir := r.getOrCreateDir(pdirName)
+ ent.NumLink++ // at least one name(ent.Name) references this entry.
+ if ent.Type == "hardlink" {
+ org, err := r.getSource(ent)
+ if err != nil {
+ return err
+ }
+ org.NumLink++ // original entry is referenced by this ent.Name.
+ ent = org
+ }
+ pdir.addChild(path.Base(name), ent)
+ }
+
+ lastOffset := r.sr.Size()
+ for i := len(r.toc.Entries) - 1; i >= 0; i-- {
+ e := r.toc.Entries[i]
+ if e.isDataType() {
+ e.nextOffset = lastOffset
+ }
+ if e.Offset != 0 && e.InnerOffset == 0 {
+ lastOffset = e.Offset
+ }
+ }
+
+ return nil
+}
+
+func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) {
+ if ent.Type == "hardlink" {
+ org, ok := r.m[cleanEntryName(ent.LinkName)]
+ if !ok {
+ return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
+ }
+ ent, err = r.getSource(org)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ent, nil
+}
+
+func parentDir(p string) string {
+ dir, _ := path.Split(p)
+ return strings.TrimSuffix(dir, "/")
+}
+
+func (r *Reader) getOrCreateDir(d string) *TOCEntry {
+ e, ok := r.m[d]
+ if !ok {
+ e = &TOCEntry{
+ Name: d,
+ Type: "dir",
+ Mode: 0755,
+ NumLink: 2, // The directory itself(.) and the parent link to this directory.
+ }
+ r.m[d] = e
+ if d != "" {
+ pdir := r.getOrCreateDir(parentDir(d))
+ pdir.addChild(path.Base(d), e)
+ }
+ }
+ return e
+}
+
+func (r *Reader) TOCDigest() digest.Digest {
+ return r.tocDigest
+}
+
+// VerifyTOC checks that the TOC JSON in the passed blob matches the
+// passed digests and that the TOC JSON contains digests for all chunks
+// contained in the blob. If the verification succceeds, this function
+// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
+func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
+ // Verify the digest of TOC JSON
+ if r.tocDigest != tocDigest {
+ return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
+ }
+ return r.Verifiers()
+}
+
+// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
+// because this doesn't verify TOC.
+func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
+ chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
+ regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
+ var chunkDigestMapIncomplete bool
+ var regDigestMapIncomplete bool
+ var containsChunk bool
+ for _, e := range r.toc.Entries {
+ if e.Type != "reg" && e.Type != "chunk" {
+ continue
+ }
+
+ // offset must be unique in stargz blob
+ _, dOK := chunkDigestMap[e.Offset]
+ _, rOK := regDigestMap[e.Offset]
+ if dOK || rOK {
+ return nil, fmt.Errorf("offset %d found twice", e.Offset)
+ }
+
+ if e.Type == "reg" {
+ if e.Size == 0 {
+ continue // ignores empty file
+ }
+
+ // record the digest of regular file payload
+ if e.Digest != "" {
+ d, err := digest.Parse(e.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
+ }
+ regDigestMap[e.Offset] = d
+ } else {
+ regDigestMapIncomplete = true
+ }
+ } else {
+ containsChunk = true // this layer contains "chunk" entries.
+ }
+
+ // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of
+ // chunked file)
+ if e.ChunkDigest != "" {
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
+ }
+ chunkDigestMap[e.Offset] = d
+ } else {
+ chunkDigestMapIncomplete = true
+ }
+ }
+
+ if chunkDigestMapIncomplete {
+ // Though some chunk digests are not found, if this layer doesn't contain
+ // "chunk"s and all digest of "reg" files are recorded, we can use them instead.
+ if !containsChunk && !regDigestMapIncomplete {
+ return &verifier{digestMap: regDigestMap}, nil
+ }
+ return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON")
+ }
+
+ return &verifier{digestMap: chunkDigestMap}, nil
+}
+
+// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
+// offset of the chunk.
+type verifier struct {
+ digestMap map[int64]digest.Digest
+ digestMapMu sync.Mutex
+}
+
+// Verifier returns a content verifier specified by TOCEntry.
+func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
+ v.digestMapMu.Lock()
+ defer v.digestMapMu.Unlock()
+ d, ok := v.digestMap[ce.Offset]
+ if !ok {
+ return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
+ ce.Offset, ce.ChunkSize)
+ }
+ return d.Verifier(), nil
+}
+
+// ChunkEntryForOffset returns the TOCEntry containing the byte of the
+// named file at the given offset within the file.
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
+ name = cleanEntryName(name)
+ e, ok = r.Lookup(name)
+ if !ok || !e.isDataType() {
+ return nil, false
+ }
+ ents := r.chunks[name]
+ if len(ents) < 2 {
+ if offset >= e.ChunkSize {
+ return nil, false
+ }
+ return e, true
+ }
+ i := sort.Search(len(ents), func(i int) bool {
+ e := ents[i]
+ return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
+ })
+ if i == len(ents) {
+ return nil, false
+ }
+ return ents[i], true
+}
+
+// Lookup returns the Table of Contents entry for the given path.
+//
+// To get the root directory, use the empty string.
+// Path must be absolute path or one that is relative to root.
+func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
+ path = cleanEntryName(path)
+ if r == nil {
+ return
+ }
+ e, ok = r.m[path]
+ if ok && e.Type == "hardlink" {
+ var err error
+ e, err = r.getSource(e)
+ if err != nil {
+ return nil, false
+ }
+ }
+ return
+}
+
+// OpenFile returns the reader of the specified file payload.
+//
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) newFileReader(name string) (*fileReader, error) {
+ name = cleanEntryName(name)
+ ent, ok := r.Lookup(name)
+ if !ok {
+ // TODO: come up with some error plan. This is lazy:
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: os.ErrNotExist,
+ }
+ }
+ if ent.Type != "reg" {
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: errors.New("not a regular file"),
+ }
+ }
+ return &fileReader{
+ r: r,
+ size: ent.Size,
+ ents: r.getChunks(ent),
+ }, nil
+}
+
+func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ fr.preRead = preRead
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
+ if ents, ok := r.chunks[ent.Name]; ok {
+ return ents
+ }
+ return []*TOCEntry{ent}
+}
+
+type fileReader struct {
+ r *Reader
+ size int64
+ ents []*TOCEntry // 1 or more reg/chunk entries
+ preRead func(*TOCEntry, io.Reader) error
+}
+
+func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
+ if off >= fr.size {
+ return 0, io.EOF
+ }
+ if off < 0 {
+ return 0, errors.New("invalid offset")
+ }
+ var i int
+ if len(fr.ents) > 1 {
+ i = sort.Search(len(fr.ents), func(i int) bool {
+ return fr.ents[i].ChunkOffset >= off
+ })
+ if i == len(fr.ents) {
+ i = len(fr.ents) - 1
+ }
+ }
+ ent := fr.ents[i]
+ if ent.ChunkOffset > off {
+ if i == 0 {
+ return 0, errors.New("internal error; first chunk offset is non-zero")
+ }
+ ent = fr.ents[i-1]
+ }
+
+ // If ent is a chunk of a large file, adjust the ReadAt
+ // offset by the chunk's offset.
+ off -= ent.ChunkOffset
+
+ finalEnt := fr.ents[len(fr.ents)-1]
+ compressedOff := ent.Offset
+ // compressedBytesRemain is the number of compressed bytes in this
+ // file remaining, over 1+ chunks.
+ compressedBytesRemain := finalEnt.NextOffset() - compressedOff
+
+ sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain)
+
+ const maxRead = 2 << 20
+ var bufSize = maxRead
+ if compressedBytesRemain < maxRead {
+ bufSize = int(compressedBytesRemain)
+ }
+
+ br := bufio.NewReaderSize(sr, bufSize)
+ if _, err := br.Peek(bufSize); err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
+ }
+
+ dr, err := fr.r.decompressor.Reader(br)
+ if err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
+ }
+ defer dr.Close()
+
+ if fr.preRead == nil {
+ if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
+ return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
+ }
+ return io.ReadFull(dr, p)
+ }
+
+ var retN int
+ var retErr error
+ var found bool
+ var nr int64
+ for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
+ if !e.isDataType() {
+ continue
+ }
+ if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
+ break
+ }
+ if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
+ return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
+ }
+ nr = e.InnerOffset
+ if e == ent {
+ found = true
+ if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
+ return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
+ }
+ retN, retErr = io.ReadFull(dr, p)
+ nr += off + int64(retN)
+ continue
+ }
+ cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
+ if err := fr.preRead(e, cr); err != nil {
+ return 0, fmt.Errorf("failed to pre read: %w", err)
+ }
+ nr += cr.n
+ }
+ if !found {
+ return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
+ }
+ return retN, retErr
+}
+
+// A Writer writes stargz files.
+//
+// Use NewWriter to create a new Writer.
+type Writer struct {
+ bw *bufio.Writer
+ cw *countWriter
+ toc *JTOC
+ diffHash hash.Hash // SHA-256 of uncompressed tar
+
+ closed bool
+ gz io.WriteCloser
+ lastUsername map[int]string
+ lastGroupname map[int]string
+ compressor Compressor
+
+ uncompressedCounter *countWriteFlusher
+
+ // ChunkSize optionally controls the maximum number of bytes
+ // of data of a regular file that can be written in one gzip
+ // stream before a new gzip stream is started.
+ // Zero means to use a default, currently 4 MiB.
+ ChunkSize int
+
+ // MinChunkSize optionally controls the minimum number of bytes
+ // of data must be written in one gzip stream before a new gzip
+ // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
+ MinChunkSize int
+
+ needsOpenGzEntries map[string]struct{}
+}
+
+// currentCompressionWriter writes to the current w.gz field, which can
+// change throughout writing a tar entry.
+//
+// Additionally, it updates w's SHA-256 of the uncompressed bytes
+// of the tar file.
+type currentCompressionWriter struct{ w *Writer }
+
+func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
+ ccw.w.diffHash.Write(p)
+ if ccw.w.gz == nil {
+ if err := ccw.w.condOpenGz(); err != nil {
+ return 0, err
+ }
+ }
+ return ccw.w.gz.Write(p)
+}
+
+func (w *Writer) chunkSize() int {
+ if w.ChunkSize <= 0 {
+ return 4 << 20
+ }
+ return w.ChunkSize
+}
+
+// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
+// TOC JSON and footer are removed.
+func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
+ footerSize := c.FooterSize()
+ if sr.Size() < footerSize {
+ return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
+ }
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ return nil, err
+ }
+ blobPayloadSize, _, _, err := c.ParseFooter(footer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse footer: %w", err)
+ }
+ if blobPayloadSize < 0 {
+ blobPayloadSize = sr.Size()
+ }
+ return c.Reader(io.LimitReader(sr, blobPayloadSize))
+}
+
+// NewWriter returns a new stargz writer (gzip-based) writing to w.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterLevel(w, gzip.BestCompression)
+}
+
+// NewWriterLevel returns a new stargz writer (gzip-based) writing to w.
+// The compression level is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
+ return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
+}
+
+// NewWriterWithCompressor returns a new stargz writer writing to w.
+// The compression method is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
+ bw := bufio.NewWriter(w)
+ cw := &countWriter{w: bw}
+ return &Writer{
+ bw: bw,
+ cw: cw,
+ toc: &JTOC{Version: 1},
+ diffHash: sha256.New(),
+ compressor: c,
+ uncompressedCounter: &countWriteFlusher{},
+ }
+}
+
+// Close writes the stargz's table of contents and flushes all the
+// buffers, returning any error.
+func (w *Writer) Close() (digest.Digest, error) {
+ if w.closed {
+ return "", nil
+ }
+ defer func() { w.closed = true }()
+
+ if err := w.closeGz(); err != nil {
+ return "", err
+ }
+
+ // Write the TOC index and footer.
+ tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash)
+ if err != nil {
+ return "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return "", err
+ }
+
+ return tocDigest, nil
+}
+
+func (w *Writer) closeGz() error {
+ if w.closed {
+ return errors.New("write on closed Writer")
+ }
+ if w.gz != nil {
+ if err := w.gz.Close(); err != nil {
+ return err
+ }
+ w.gz = nil
+ }
+ return nil
+}
+
+func (w *Writer) flushGz() error {
+ if w.closed {
+ return errors.New("flush on closed Writer")
+ }
+ if w.gz != nil {
+ if f, ok := w.gz.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ }
+ return nil
+}
+
+// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
+// in which case it returns the empty string.
+func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
+ if name == "" {
+ return ""
+ }
+ if *mp == nil {
+ *mp = make(map[int]string)
+ }
+ if (*mp)[id] == name {
+ return ""
+ }
+ (*mp)[id] = name
+ return name
+}
+
+func (w *Writer) condOpenGz() (err error) {
+ if w.gz == nil {
+ w.gz, err = w.compressor.Writer(w.cw)
+ if w.gz != nil {
+ w.gz = w.uncompressedCounter.register(w.gz)
+ }
+ }
+ return
+}
+
+// AppendTar reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+func (w *Writer) AppendTar(r io.Reader) error {
+ return w.appendTar(r, false)
+}
+
+// AppendTarLossLess reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+//
+// The difference of this func with AppendTar is that this writes
+// the input tar stream into w without any modification (e.g. to header bytes).
+//
+// Note that if the input tar stream already contains TOC JSON, this returns
+// error because w cannot overwrite the TOC JSON to the one generated by w without
+// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
+// you shoud decompress it and remove TOC JSON in advance.
+func (w *Writer) AppendTarLossLess(r io.Reader) error {
+ return w.appendTar(r, true)
+}
+
+func (w *Writer) appendTar(r io.Reader, lossless bool) error {
+ var src io.Reader
+ br := bufio.NewReader(r)
+ if isGzip(br) {
+ zr, _ := gzip.NewReader(br)
+ src = zr
+ } else {
+ src = io.Reader(br)
+ }
+ dst := currentCompressionWriter{w}
+ var tw *tar.Writer
+ if !lossless {
+ tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
+ }
+ tr := tar.NewReader(src)
+ if lossless {
+ tr.RawAccounting = true
+ }
+ prevOffset := w.cw.n
+ var prevOffsetUncompressed int64
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ if lossless {
+ if remain := tr.RawBytes(); len(remain) > 0 {
+ // Collect the remaining null bytes.
+ // /~https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
+ if _, err := dst.Write(remain); err != nil {
+ return err
+ }
+ }
+ }
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
+ }
+ if cleanEntryName(h.Name) == TOCTarName {
+ // It is possible for a layer to be "stargzified" twice during the
+ // distribution lifecycle. So we reserve "TOCTarName" here to avoid
+ // duplicated entries in the resulting layer.
+ if lossless {
+ // We cannot handle this in lossless way.
+ return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
+ }
+ continue
+ }
+
+ xattrs := make(map[string][]byte)
+ const xattrPAXRecordsPrefix = "SCHILY.xattr."
+ if h.PAXRecords != nil {
+ for k, v := range h.PAXRecords {
+ if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
+ xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
+ }
+ }
+ }
+ ent := &TOCEntry{
+ Name: h.Name,
+ Mode: h.Mode,
+ UID: h.Uid,
+ GID: h.Gid,
+ Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
+ Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
+ ModTime3339: formatModtime(h.ModTime),
+ Xattrs: xattrs,
+ }
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+ if tw != nil {
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+ } else {
+ if _, err := dst.Write(tr.RawBytes()); err != nil {
+ return err
+ }
+ }
+ switch h.Typeflag {
+ case tar.TypeLink:
+ ent.Type = "hardlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeSymlink:
+ ent.Type = "symlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeDir:
+ ent.Type = "dir"
+ case tar.TypeReg:
+ ent.Type = "reg"
+ ent.Size = h.Size
+ case tar.TypeChar:
+ ent.Type = "char"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeBlock:
+ ent.Type = "block"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeFifo:
+ ent.Type = "fifo"
+ default:
+ return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
+ }
+
+ // We need to keep a reference to the TOC entry for regular files, so that we
+ // can fill the digest later.
+ var regFileEntry *TOCEntry
+ var payloadDigest digest.Digester
+ if h.Typeflag == tar.TypeReg {
+ regFileEntry = ent
+ payloadDigest = digest.Canonical.Digester()
+ }
+
+ if h.Typeflag == tar.TypeReg && ent.Size > 0 {
+ var written int64
+ totalSize := ent.Size // save it before we destroy ent
+ tee := io.TeeReader(tr, payloadDigest.Hash())
+ for written < totalSize {
+ chunkSize := int64(w.chunkSize())
+ remain := totalSize - written
+ if remain < chunkSize {
+ chunkSize = remain
+ } else {
+ ent.ChunkSize = chunkSize
+ }
+
+ // We flush the underlying compression writer here to correctly calculate "w.cw.n".
+ if err := w.flushGz(); err != nil {
+ return err
+ }
+ if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
+ if err := w.closeGz(); err != nil {
+ return err
+ }
+ ent.Offset = w.cw.n
+ prevOffset = ent.Offset
+ prevOffsetUncompressed = w.uncompressedCounter.n
+ } else {
+ ent.Offset = prevOffset
+ ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
+ }
+
+ ent.ChunkOffset = written
+ chunkDigest := digest.Canonical.Digester()
+
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+
+ teeChunk := io.TeeReader(tee, chunkDigest.Hash())
+ var out io.Writer
+ if tw != nil {
+ out = tw
+ } else {
+ out = dst
+ }
+ if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
+ return fmt.Errorf("error copying %q: %v", h.Name, err)
+ }
+ ent.ChunkDigest = chunkDigest.Digest().String()
+ w.toc.Entries = append(w.toc.Entries, ent)
+ written += chunkSize
+ ent = &TOCEntry{
+ Name: h.Name,
+ Type: "chunk",
+ }
+ }
+ } else {
+ w.toc.Entries = append(w.toc.Entries, ent)
+ }
+ if payloadDigest != nil {
+ regFileEntry.Digest = payloadDigest.Digest().String()
+ }
+ if tw != nil {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ }
+ }
+ remainDest := io.Discard
+ if lossless {
+ remainDest = dst // Preserve the remaining bytes in lossless mode
+ }
+ _, err := io.Copy(remainDest, src)
+ return err
+}
+
+func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
+ if ent.Type != "reg" {
+ return false
+ }
+ if w.needsOpenGzEntries == nil {
+ return false
+ }
+ _, ok := w.needsOpenGzEntries[ent.Name]
+ return ok
+}
+
+// DiffID returns the SHA-256 of the uncompressed tar bytes.
+// It is only valid to call DiffID after Close.
+func (w *Writer) DiffID() string {
+ return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
+}
+
+func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
+ for _, d := range decompressors {
+ if s := d.FooterSize(); res < s && s <= blobSize {
+ res = s
+ }
+ }
+ return
+}
+
+func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
+ if tocOff < 0 {
+ // This means that TOC isn't contained in the blob.
+ // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
+ // the external location.
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(nil)
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ if len(tocBytes) > 0 {
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err == nil {
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ }
+
+ start := time.Now()
+ tocBytes = make([]byte, tocSize)
+ if _, err := sr.ReadAt(tocBytes, tocOff); err != nil {
+ return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ start = time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+}
+
+func formatModtime(t time.Time) string {
+ if t.IsZero() || t.Unix() == 0 {
+ return ""
+ }
+ return t.UTC().Round(time.Second).Format(time.RFC3339)
+}
+
+func cleanEntryName(name string) string {
+ // Use path.Clean to consistently deal with path separators across platforms.
+ return strings.TrimPrefix(path.Clean("/"+name), "/")
+}
+
+// countWriter counts how many bytes have been written to its wrapped
+// io.Writer.
+type countWriter struct {
+ w io.Writer
+ n int64
+}
+
+func (cw *countWriter) Write(p []byte) (n int, err error) {
+ n, err = cw.w.Write(p)
+ cw.n += int64(n)
+ return
+}
+
+type countWriteFlusher struct {
+ io.WriteCloser
+ n int64
+}
+
+func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
+ wc.WriteCloser = w
+ return wc
+}
+
+func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
+ n, err = wc.WriteCloser.Write(p)
+ wc.n += int64(n)
+ return
+}
+
+func (wc *countWriteFlusher) Flush() error {
+ if f, ok := wc.WriteCloser.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ return nil
+}
+
+func (wc *countWriteFlusher) Close() error {
+ err := wc.WriteCloser.Close()
+ wc.WriteCloser = nil
+ return err
+}
+
+// isGzip reports whether br is positioned right before an upcoming gzip stream.
+// It does not consume any bytes from br.
+func isGzip(br *bufio.Reader) bool {
+ const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ )
+ peek, _ := br.Peek(3)
+ return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
+}
+
+func positive(n int64) int64 {
+ if n < 0 {
+ return 0
+ }
+ return n
+}
+
+type countReader struct {
+ r io.Reader
+ n int64
+}
+
+func (cr *countReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ cr.n += int64(n)
+ return
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/externaltoc/externaltoc.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/externaltoc/externaltoc.go
new file mode 100644
index 000000000000..5f466177cbb9
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/externaltoc/externaltoc.go
@@ -0,0 +1,278 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package externaltoc
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ digest "github.com/opencontainers/go-digest"
+)
+
+type GzipCompression struct {
+ *GzipCompressor
+ *GzipDecompressor
+}
+
+func NewGzipCompressionWithLevel(provideTOC func() ([]byte, error), level int) estargz.Compression {
+ return &GzipCompression{
+ NewGzipCompressorWithLevel(level),
+ NewGzipDecompressor(provideTOC),
+ }
+}
+
+func NewGzipCompressor() *GzipCompressor {
+ return &GzipCompressor{compressionLevel: gzip.BestCompression}
+}
+
+func NewGzipCompressorWithLevel(level int) *GzipCompressor {
+ return &GzipCompressor{compressionLevel: level}
+}
+
+type GzipCompressor struct {
+ compressionLevel int
+ buf *bytes.Buffer
+}
+
+func (gc *GzipCompressor) WriteTOCTo(w io.Writer) (int, error) {
+ if len(gc.buf.Bytes()) == 0 {
+ return 0, fmt.Errorf("TOC hasn't been registered")
+ }
+ return w.Write(gc.buf.Bytes())
+}
+
+func (gc *GzipCompressor) Writer(w io.Writer) (estargz.WriteFlushCloser, error) {
+ return gzip.NewWriterLevel(w, gc.compressionLevel)
+}
+
+func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *estargz.JTOC, diffHash hash.Hash) (digest.Digest, error) {
+ tocJSON, err := json.MarshalIndent(toc, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ buf := new(bytes.Buffer)
+ gz, _ := gzip.NewWriterLevel(buf, gc.compressionLevel)
+ // TOC isn't written to layer so no effect to diff ID
+ tw := tar.NewWriter(gz)
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: estargz.TOCTarName,
+ Size: int64(len(tocJSON)),
+ }); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(tocJSON); err != nil {
+ return "", err
+ }
+
+ if err := tw.Close(); err != nil {
+ return "", err
+ }
+ if err := gz.Close(); err != nil {
+ return "", err
+ }
+ gc.buf = buf
+ footerBytes, err := gzipFooterBytes()
+ if err != nil {
+ return "", err
+ }
+ if _, err := w.Write(footerBytes); err != nil {
+ return "", err
+ }
+ return digest.FromBytes(tocJSON), nil
+}
+
+// The footer is an empty gzip stream with no compression and an Extra header.
+//
+// 46 comes from:
+//
+// 10 bytes gzip header
+// 2 bytes XLEN (length of Extra field) = 21 (4 bytes header + len("STARGZEXTERNALTOC"))
+// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
+// 2 bytes Extra: LEN = 17 (len("STARGZEXTERNALTOC"))
+// 17 bytes Extra: subfield = "STARGZEXTERNALTOC"
+// 5 bytes flate header
+// 8 bytes gzip footer
+// (End of the eStargz blob)
+const FooterSize = 46
+
+// gzipFooterBytes returns the 104 bytes footer.
+func gzipFooterBytes() ([]byte, error) {
+ buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
+ gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
+
+ // Extra header indicating the offset of TOCJSON
+ // https://tools.ietf.org/html/rfc1952#section-2.3.1.1
+ header := make([]byte, 4)
+ header[0], header[1] = 'S', 'G'
+ subfield := "STARGZEXTERNALTOC" // len("STARGZEXTERNALTOC") = 17
+ binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
+ gz.Header.Extra = append(header, []byte(subfield)...)
+ if err := gz.Close(); err != nil {
+ return nil, err
+ }
+ if buf.Len() != FooterSize {
+ panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
+ }
+ return buf.Bytes(), nil
+}
+
+func NewGzipDecompressor(provideTOCFunc func() ([]byte, error)) *GzipDecompressor {
+ return &GzipDecompressor{provideTOCFunc: provideTOCFunc}
+}
+
+type GzipDecompressor struct {
+ provideTOCFunc func() ([]byte, error)
+ rawTOC []byte // Do not access this field directly. Get this through getTOC() method.
+ getTOCOnce sync.Once
+}
+
+func (gz *GzipDecompressor) getTOC() ([]byte, error) {
+ if len(gz.rawTOC) == 0 {
+ var retErr error
+ gz.getTOCOnce.Do(func() {
+ if gz.provideTOCFunc == nil {
+ retErr = fmt.Errorf("TOC hasn't been provided")
+ return
+ }
+ rawTOC, err := gz.provideTOCFunc()
+ if err != nil {
+ retErr = err
+ return
+ }
+ gz.rawTOC = rawTOC
+ })
+ if retErr != nil {
+ return nil, retErr
+ }
+ if len(gz.rawTOC) == 0 {
+ return nil, fmt.Errorf("no TOC is provided")
+ }
+ }
+ return gz.rawTOC, nil
+}
+
+func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *estargz.JTOC, tocDgst digest.Digest, err error) {
+ if r != nil {
+ return nil, "", fmt.Errorf("TOC must be provided externally but got internal one")
+ }
+ rawTOC, err := gz.getTOC()
+ if err != nil {
+ return nil, "", fmt.Errorf("failed to get TOC: %v", err)
+ }
+ return parseTOCEStargz(bytes.NewReader(rawTOC))
+}
+
+func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != FooterSize {
+ return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
+ if si1 != 'S' || si2 != 'G' {
+ return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
+ }
+ if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(len("STARGZEXTERNALTOC")) {
+ return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
+ }
+ if string(subfield) != "STARGZEXTERNALTOC" {
+ return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
+ }
+ // tocOffset < 0 indicates external TOC.
+ // blobPayloadSize < 0 indicates the entire blob size.
+ return -1, -1, 0, nil
+}
+
+func (gz *GzipDecompressor) FooterSize() int64 {
+ return FooterSize
+}
+
+func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ if r != nil {
+ return nil, fmt.Errorf("TOC must be provided externally but got internal one")
+ }
+ rawTOC, err := gz.getTOC()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get TOC: %v", err)
+ }
+ return decompressTOCEStargz(bytes.NewReader(rawTOC))
+}
+
+func parseTOCEStargz(r io.Reader) (toc *estargz.JTOC, tocDgst digest.Digest, err error) {
+ tr, err := decompressTOCEStargz(r)
+ if err != nil {
+ return nil, "", err
+ }
+ dgstr := digest.Canonical.Digester()
+ toc = new(estargz.JTOC)
+ if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
+ }
+ if err := tr.Close(); err != nil {
+ return nil, "", err
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ zr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
+ }
+ zr.Multistream(false)
+ tr := tar.NewReader(zr)
+ h, err := tr.Next()
+ if err != nil {
+ return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
+ }
+ if h.Name != estargz.TOCTarName {
+ return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, estargz.TOCTarName)
+ }
+ return readCloser{tr, zr.Close}, nil
+}
+
+type readCloser struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (rc readCloser) Close() error {
+ return rc.closeFunc()
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
new file mode 100644
index 000000000000..f24afe32f450
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
@@ -0,0 +1,237 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "strconv"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type gzipCompression struct {
+ *GzipCompressor
+ *GzipDecompressor
+}
+
+func newGzipCompressionWithLevel(level int) Compression {
+ return &gzipCompression{
+ &GzipCompressor{level},
+ &GzipDecompressor{},
+ }
+}
+
+func NewGzipCompressor() *GzipCompressor {
+ return &GzipCompressor{gzip.BestCompression}
+}
+
+func NewGzipCompressorWithLevel(level int) *GzipCompressor {
+ return &GzipCompressor{level}
+}
+
+type GzipCompressor struct {
+ compressionLevel int
+}
+
+func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
+ return gzip.NewWriterLevel(w, gc.compressionLevel)
+}
+
+func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
+ tocJSON, err := json.MarshalIndent(toc, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
+ gw := io.Writer(gz)
+ if diffHash != nil {
+ gw = io.MultiWriter(gz, diffHash)
+ }
+ tw := tar.NewWriter(gw)
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: TOCTarName,
+ Size: int64(len(tocJSON)),
+ }); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(tocJSON); err != nil {
+ return "", err
+ }
+
+ if err := tw.Close(); err != nil {
+ return "", err
+ }
+ if err := gz.Close(); err != nil {
+ return "", err
+ }
+ if _, err := w.Write(gzipFooterBytes(off)); err != nil {
+ return "", err
+ }
+ return digest.FromBytes(tocJSON), nil
+}
+
+// gzipFooterBytes returns the 51 bytes footer.
+func gzipFooterBytes(tocOff int64) []byte {
+ buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
+ gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
+
+ // Extra header indicating the offset of TOCJSON
+ // https://tools.ietf.org/html/rfc1952#section-2.3.1.1
+ header := make([]byte, 4)
+ header[0], header[1] = 'S', 'G'
+ subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
+ binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
+ gz.Header.Extra = append(header, []byte(subfield)...)
+ gz.Close()
+ if buf.Len() != FooterSize {
+ panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
+ }
+ return buf.Bytes()
+}
+
+type GzipDecompressor struct{}
+
+func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != FooterSize {
+ return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
+ if si1 != 'S' || si2 != 'G' {
+ return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
+ }
+ if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
+ return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
+ }
+ if string(subfield[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
+ }
+ tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *GzipDecompressor) FooterSize() int64 {
+ return FooterSize
+}
+
+func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+type LegacyGzipDecompressor struct{}
+
+func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != legacyFooterSize {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ if len(extra) != 16+len("STARGZ") {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
+ }
+ if string(extra[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
+ }
+ tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *LegacyGzipDecompressor) FooterSize() int64 {
+ return legacyFooterSize
+}
+
+func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ tr, err := decompressTOCEStargz(r)
+ if err != nil {
+ return nil, "", err
+ }
+ dgstr := digest.Canonical.Digester()
+ toc = new(JTOC)
+ if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
+ }
+ if err := tr.Close(); err != nil {
+ return nil, "", err
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ zr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
+ }
+ zr.Multistream(false)
+ tr := tar.NewReader(zr)
+ h, err := tr.Next()
+ if err != nil {
+ return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
+ }
+ if h.Name != TOCTarName {
+ return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
+ }
+ return readCloser{tr, zr.Close}, nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
new file mode 100644
index 000000000000..0ca6fd75f2ed
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -0,0 +1,2366 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// TestingController is Compression with some helper methods necessary for testing.
+type TestingController interface {
+ Compression
+ TestStreams(t *testing.T, b []byte, streams []int64)
+ DiffIDOf(*testing.T, []byte) string
+ String() string
+}
+
+// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
+func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
+ t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
+ t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
+ t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
+}
+
+type TestingControllerFactory func() TestingController
+
+const (
+ uncompressedType int = iota
+ gzipType
+ zstdType
+)
+
+var srcCompressions = []int{
+ uncompressedType,
+ gzipType,
+ zstdType,
+}
+
+var allowedPrefix = [4]string{"", "./", "/", "../"}
+
+// testBuild tests the resulting stargz blob built by this pkg has the same
+// contents as the normal stargz blob.
+func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize []int
+ in []tarEntry
+ }{
+ {
+ name: "regfiles and directories",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "test1"),
+ dir("foo2/"),
+ file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})),
+ ),
+ },
+ {
+ name: "empty files",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "tttttt"),
+ file("foo_empty", ""),
+ file("foo2", "tttttt"),
+ file("foo_empty2", ""),
+ file("foo3", "tttttt"),
+ file("foo_empty3", ""),
+ file("foo4", "tttttt"),
+ file("foo_empty4", ""),
+ file("foo5", "tttttt"),
+ file("foo_empty5", ""),
+ file("foo6", "tttttt"),
+ ),
+ },
+ {
+ name: "various files",
+ chunkSize: 4,
+ minChunkSize: []int{0, 64000},
+ in: tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo1.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ {
+ name: "no contents",
+ chunkSize: 4,
+ in: tarOf(
+ file("baz.txt", ""),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ }
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
+ tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
+ // Test divideEntries()
+ entries, err := sortEntries(tarBlob, nil, nil) // identical order
+ if err != nil {
+ t.Fatalf("failed to parse tar: %v", err)
+ }
+ var merged []*entry
+ for _, part := range divideEntries(entries, 4) {
+ merged = append(merged, part...)
+ }
+ if !reflect.DeepEqual(entries, merged) {
+ for _, e := range entries {
+ t.Logf("Original: %v", e.header)
+ }
+ for _, e := range merged {
+ t.Logf("Merged: %v", e.header)
+ }
+ t.Errorf("divided entries couldn't be merged")
+ return
+ }
+
+ // Prepare sample data
+ cl1 := newCL()
+ wantBuf := new(bytes.Buffer)
+ sw := NewWriterWithCompressor(wantBuf, cl1)
+ sw.MinChunkSize = minChunkSize
+ sw.ChunkSize = tt.chunkSize
+ if err := sw.AppendTar(tarBlob); err != nil {
+ t.Fatalf("failed to append tar to want stargz: %v", err)
+ }
+ if _, err := sw.Close(); err != nil {
+ t.Fatalf("failed to prepare want stargz: %v", err)
+ }
+ wantData := wantBuf.Bytes()
+ want, err := Open(io.NewSectionReader(
+ bytes.NewReader(wantData), 0, int64(len(wantData))),
+ WithDecompressors(cl1),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the want stargz: %v", err)
+ }
+
+ // Prepare testing data
+ var opts []Option
+ if minChunkSize > 0 {
+ opts = append(opts, WithMinChunkSize(minChunkSize))
+ }
+ cl2 := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
+ if err != nil {
+ t.Fatalf("failed to build stargz: %v", err)
+ }
+ defer rc.Close()
+ gotBuf := new(bytes.Buffer)
+ if _, err := io.Copy(gotBuf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ gotData := gotBuf.Bytes()
+ got, err := Open(io.NewSectionReader(
+ bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
+ WithDecompressors(cl2),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the got stargz: %v", err)
+ }
+
+ // Check DiffID is properly calculated
+ rc.Close()
+ diffID := rc.DiffID()
+ wantDiffID := cl2.DiffIDOf(t, gotData)
+ if diffID.String() != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ // Compare as stargz
+ if !isSameVersion(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz hasn't same json")
+ return
+ }
+ if !isSameEntries(t, want, got) {
+ t.Errorf("built stargz isn't same as the original")
+ return
+ }
+
+ // Compare as tar.gz
+ if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz isn't same tar.gz")
+ return
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aGz, err := cla.Reader(bytes.NewReader(a))
+ if err != nil {
+ t.Fatalf("failed to read A")
+ }
+ defer aGz.Close()
+ bGz, err := clb.Reader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("failed to read B")
+ }
+ defer bGz.Close()
+
+ // Same as tar's Next() method but ignores landmarks and TOCJSON file
+ next := func(r *tar.Reader) (h *tar.Header, err error) {
+ for {
+ if h, err = r.Next(); err != nil {
+ return
+ }
+ if h.Name != PrefetchLandmark &&
+ h.Name != NoPrefetchLandmark &&
+ h.Name != TOCTarName {
+ return
+ }
+ }
+ }
+
+ aTar := tar.NewReader(aGz)
+ bTar := tar.NewReader(bGz)
+ for {
+ // Fetch and parse next header.
+ aH, aErr := next(aTar)
+ bH, bErr := next(bTar)
+ if aErr != nil || bErr != nil {
+ if aErr == io.EOF && bErr == io.EOF {
+ break
+ }
+ t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr)
+ }
+ if !reflect.DeepEqual(aH, bH) {
+ t.Logf("different header (A = %v; B = %v)", aH, bH)
+ return false
+
+ }
+ aFile, err := io.ReadAll(aTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of A")
+ }
+ bFile, err := io.ReadAll(bTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of B")
+ }
+ if !bytes.Equal(aFile, bFile) {
+ t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b))
+ return false
+ }
+ }
+
+ return true
+}
+
+func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
+ if err != nil {
+ t.Fatalf("failed to parse A: %v", err)
+ }
+ bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
+ if err != nil {
+ t.Fatalf("failed to parse B: %v", err)
+ }
+ t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC))
+ t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC))
+ return aJTOC.Version == bJTOC.Version
+}
+
+func isSameEntries(t *testing.T, a, b *Reader) bool {
+ aroot, ok := a.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of A")
+ }
+ broot, ok := b.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of B")
+ }
+ aEntry := stargzEntry{aroot, a}
+ bEntry := stargzEntry{broot, b}
+ return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry)
+}
+
+func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader {
+ buf := new(bytes.Buffer)
+ var w io.WriteCloser
+ var err error
+ if srcCompression == gzipType {
+ w = gzip.NewWriter(buf)
+ } else if srcCompression == zstdType {
+ w, err = zstd.NewWriter(buf)
+ if err != nil {
+ t.Fatalf("failed to init zstd writer: %v", err)
+ }
+ } else {
+ return src
+ }
+ src.Seek(0, io.SeekStart)
+ if _, err := io.Copy(w, src); err != nil {
+ t.Fatalf("failed to compress source")
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("failed to finalize compress source")
+ }
+ data := buf.Bytes()
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+
+}
+
+type stargzEntry struct {
+ e *TOCEntry
+ r *Reader
+}
+
+// contains checks if all child entries in "b" are also contained in "a".
+// This function also checks if the files/chunks contain the same contents among "a" and "b".
+func contains(t *testing.T, a, b stargzEntry) bool {
+ ae, ar := a.e, a.r
+ be, br := b.e, b.r
+ t.Logf("Comparing: %q vs %q", ae.Name, be.Name)
+ if !equalEntry(ae, be) {
+ t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be)
+ return false
+ }
+ if ae.Type == "dir" {
+ t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name,
+ allChildrenName(ae), allChildrenName(be))
+ iscontain := true
+ ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool {
+ // Walk through all files on this stargz file.
+
+ if aChild.Name == PrefetchLandmark ||
+ aChild.Name == NoPrefetchLandmark {
+ return true // Ignore landmarks
+ }
+
+ // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory
+ // because this points to the root directory itself.
+ if aChild.Name == "" && ae.Name == "" {
+ return true
+ }
+
+ bChild, ok := be.LookupChild(aBaseName)
+ if !ok {
+ t.Logf("%q (base: %q): not found in b: %v",
+ ae.Name, aBaseName, allChildrenName(be))
+ iscontain = false
+ return false
+ }
+
+ childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r})
+ if !childcontain {
+ t.Logf("%q != %q: non-equal dir", ae.Name, be.Name)
+ iscontain = false
+ return false
+ }
+ return true
+ })
+ return iscontain
+ } else if ae.Type == "reg" {
+ af, err := ar.OpenFile(ae.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on A: %v", ae.Name, err)
+ }
+ bf, err := br.OpenFile(be.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on B: %v", be.Name, err)
+ }
+
+ var nr int64
+ for nr < ae.Size {
+ abytes, anext, aok := readOffset(t, af, nr, a)
+ bbytes, bnext, bok := readOffset(t, bf, nr, b)
+ if !aok && !bok {
+ break
+ } else if !(aok && bok) || anext != bnext {
+ t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v",
+ ae.Name, be.Name, nr, aok, bok, anext, bnext)
+ return false
+ }
+ nr = anext
+ if !bytes.Equal(abytes, bbytes) {
+ t.Logf("%q != %q: different contents %v vs %v",
+ ae.Name, be.Name, string(abytes), string(bbytes))
+ return false
+ }
+ }
+ return true
+ }
+
+ return true
+}
+
+func allChildrenName(e *TOCEntry) (children []string) {
+ e.ForeachChild(func(baseName string, _ *TOCEntry) bool {
+ children = append(children, baseName)
+ return true
+ })
+ return
+}
+
+func equalEntry(a, b *TOCEntry) bool {
+ // Here, we selectively compare fileds that we are interested in.
+ return a.Name == b.Name &&
+ a.Type == b.Type &&
+ a.Size == b.Size &&
+ a.ModTime3339 == b.ModTime3339 &&
+ a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time
+ a.LinkName == b.LinkName &&
+ a.Mode == b.Mode &&
+ a.UID == b.UID &&
+ a.GID == b.GID &&
+ a.Uname == b.Uname &&
+ a.Gname == b.Gname &&
+ (a.Offset >= 0) == (b.Offset >= 0) &&
+ (a.NextOffset() > 0) == (b.NextOffset() > 0) &&
+ a.DevMajor == b.DevMajor &&
+ a.DevMinor == b.DevMinor &&
+ a.NumLink == b.NumLink &&
+ reflect.DeepEqual(a.Xattrs, b.Xattrs) &&
+ // chunk-related infomations aren't compared in this function.
+ // ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ // ChunkSize int64 `json:"chunkSize,omitempty"`
+ // children map[string]*TOCEntry
+ a.Digest == b.Digest
+}
+
+func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
+ ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset)
+ if !ok {
+ return nil, 0, false
+ }
+ data := make([]byte, ce.ChunkSize)
+ t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset())
+ n, err := r.ReadAt(data, ce.ChunkOffset)
+ if err != nil {
+ t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v",
+ e.e.Name, ce.ChunkOffset, ce.ChunkSize, err)
+ }
+ if int64(n) != ce.ChunkSize {
+ t.Fatalf("unexpected copied data size %d; want %d",
+ n, ce.ChunkSize)
+ }
+ return data[:n], offset + ce.ChunkSize, true
+}
+
+func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
+ jtocData, err := json.Marshal(*tocJSON)
+ if err != nil {
+ t.Fatalf("failed to marshal TOC JSON: %v", err)
+ }
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil {
+ t.Fatalf("failed to read toc json blob: %v", err)
+ }
+ return buf.String()
+}
+
+const chunkSize = 3
+
+// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
+type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
+
+// testDigestAndVerify runs specified checks against sample stargz blobs.
+func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
+ checks []check
+ minChunkSize []int
+ }{
+ {
+ name: "no-regfile",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ dir("test/"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ dir("test2/"), // modified
+ ), allowedPrefix[0])),
+ },
+ },
+ {
+ name: "small-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "bbb", dgstMap),
+ )
+ },
+ minChunkSize: []int{0, 64000},
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", ""),
+ file("foo.txt", "M"), // modified
+ dir("test/"),
+ file("test/bar.txt", "bbb"),
+ ), allowedPrefix[0])),
+ // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO
+ checkVerifyBrokenContentFail("foo.txt"),
+ },
+ },
+ {
+ name: "big-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
+ file("foo.txt", "a"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ {
+ name: "with-non-regfiles",
+ minChunkSize: []int{0, 64000},
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ regDigest(t, "bar/foo2.txt", "b", dgstMap),
+ regDigest(t, "foo3.txt", "c", dgstMap),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ dir("test2/"),
+ link("test2/bazlink", "foo.txt"), // modified
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
+ // Get original tar file and chunk digests
+ dgstMap := make(map[string]digest.Digest)
+ tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
+
+ cl := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ tocDigest := rc.TOCDigest()
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ newStargz := buf.Bytes()
+ // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
+ dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
+
+ for _, check := range tt.checks {
+ check(t, newStargz, tocDigest, dgstMap, cl, newCL)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
+// digest and contains valid chunks. It walks all entries in the stargz and
+// checks all chunk digests stored to the TOC JSON match the actual contents.
+func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ digestMapTOC, err := listDigests(io.NewSectionReader(
+ bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ controller,
+ )
+ if err != nil {
+ t.Fatalf("failed to list digest: %v", err)
+ }
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ // Check the digest of TOC JSON based on the actual contents
+ // It's sure that TOC JSON exists in this archive because
+ // Open succeeded.
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(dgstr.Hash(), tr); err != nil {
+ t.Fatalf("failed to calculate digest of TOC JSON: %v",
+ err)
+ }
+ if dgstr.Digest() != tocDigest {
+ t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest())
+ }
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ // Get the original digest to make sure the file contents are kept unchanged
+ // from the original tar, during the whole conversion steps.
+ id := chunkID(h.Name, n, ce.ChunkSize)
+ want, ok := dgstMap[id]
+ if !ok {
+ t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v",
+ h.Name, n, ce.ChunkSize, dgstMap)
+ return
+ }
+ found[id] = true
+
+ // Check the file contents
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if want != dgstr.Digest() {
+ t.Errorf("Invalid contents in converted stargz %q: %q; want %q",
+ h.Name, dgstr.Digest(), want)
+ return
+ }
+
+ // Check the digest stored in TOC JSON
+ dgstTOC, ok := digestMapTOC[ce.Offset]
+ if !ok {
+ t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered",
+ h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset)
+ }
+ if want != dgstTOC {
+ t.Errorf("Invalid digest in TOCEntry %q: %q; want %q",
+ h.Name, dgstTOC, want)
+ return
+ }
+
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyTOC checks the verification works for the TOC JSON of the passed
+// stargz. It walks all entries in the stargz and checks the verifications for
+// all chunks work.
+func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Errorf("failed to verify stargz: %v", err)
+ return
+ }
+
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n)
+ }
+
+ found[chunkID(h.Name, n, ce.ChunkSize)] = true
+
+ // Check the file contents
+ if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if !v.Verified() {
+ t.Errorf("Invalid contents in converted stargz %q (should be succeeded)",
+ h.Name)
+ return
+ }
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
+// detected during the verification and the verification returns an error.
+func checkVerifyInvalidTOCEntryFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ funcs := map[string]rewriteFunc{
+ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var found bool
+ for _, e := range toc.Entries {
+ if cleanEntryName(e.Name) == filename {
+ if e.Type != "reg" && e.Type != "chunk" {
+ t.Fatalf("entry %q to break must be regfile or chunk", filename)
+ }
+ if e.ChunkDigest == "" {
+ t.Fatalf("entry %q is already invalid", filename)
+ }
+ e.ChunkDigest = ""
+ found = true
+ }
+ }
+ if !found {
+ t.Fatalf("rewrite target not found")
+ }
+ },
+ "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var (
+ sampleEntry *TOCEntry
+ targetEntry *TOCEntry
+ )
+ for _, e := range toc.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if cleanEntryName(e.Name) == filename {
+ targetEntry = e
+ } else {
+ sampleEntry = e
+ }
+ }
+ }
+ if sampleEntry == nil {
+ t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
+ }
+ if targetEntry == nil {
+ t.Fatalf("rewrite target not found")
+ }
+ targetEntry.Offset = sampleEntry.Offset
+ },
+ }
+
+ for name, rFunc := range funcs {
+ t.Run(name, func(t *testing.T) {
+ newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller)
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, newSgz); err != nil {
+ t.Fatalf("failed to get converted stargz")
+ }
+ isgz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(newTocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ })
+ }
+ }
+}
+
+// checkVerifyInvalidStargzFail checks if the verification detects that the
+// given stargz file doesn't match to the expected digest and returns error.
+func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ cl := newController()
+ rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ mStargz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
+ WithDecompressors(cl),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(tocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ }
+}
+
+// checkVerifyBrokenContentFail checks if the verifier detects broken contents
+// that doesn't match to the expected digest and returns error.
+func checkVerifyBrokenContentFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ // Parse stargz file
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Fatalf("failed to verify stargz: %v", err)
+ return
+ }
+
+ // Open the target file
+ sr, err := sgz.OpenFile(filename)
+ if err != nil {
+ t.Fatalf("failed to open file %q", filename)
+ }
+ ce, ok := sgz.ChunkEntryForOffset(filename, 0)
+ if !ok {
+ t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0)
+ return
+ }
+ if ce.ChunkSize == 0 {
+ t.Fatalf("file mustn't be empty")
+ return
+ }
+ data := make([]byte, ce.ChunkSize)
+ if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil {
+ t.Errorf("failed to get data of a chunk of %q(offset=%q)",
+ filename, ce.ChunkOffset)
+ }
+
+ // Check the broken chunk (must fail)
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Fatalf("failed to get verifier for %q", filename)
+ }
+ broken := append([]byte{^data[0]}, data[1:]...)
+ if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ filename, ce.ChunkOffset, ce.ChunkSize)
+ }
+ if v.Verified() {
+ t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)",
+ filename, data, broken)
+ }
+ }
+}
+
+func chunkID(name string, offset, size int64) string {
+ return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size)
+}
+
+type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader)
+
+func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
+ decodedJTOC, jtocOffset, err := parseStargz(sgz, controller)
+ if err != nil {
+ t.Fatalf("failed to extract TOC JSON: %v", err)
+ }
+
+ rewrite(t, decodedJTOC, sgz)
+
+ tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset)
+ if err != nil {
+ t.Fatalf("failed to create toc and footer: %v", err)
+ }
+
+ // Reconstruct stargz file with the modified TOC JSON
+ if _, err := sgz.Seek(0, io.SeekStart); err != nil {
+ t.Fatalf("failed to reset the seek position of stargz: %v", err)
+ }
+ return io.MultiReader(
+ io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON)
+ tocFooter, // Rewritten TOC and footer
+ ), tocDigest
+}
+
+func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) {
+ decodedJTOC, _, err := parseStargz(sgz, controller)
+ if err != nil {
+ return nil, err
+ }
+ digestMap := make(map[int64]digest.Digest)
+ for _, e := range decodedJTOC.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if e.Type == "reg" && e.Size == 0 {
+ continue // ignores empty file
+ }
+ if e.ChunkDigest == "" {
+ return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
+ e.Name, e.Offset)
+ }
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, err
+ }
+ digestMap[e.Offset] = d
+ }
+ }
+ return digestMap, nil
+}
+
+func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) {
+ fSize := controller.FooterSize()
+ footer := make([]byte, fSize)
+ if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
+ return nil, 0, fmt.Errorf("error reading footer: %w", err)
+ }
+ _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
+ }
+
+ // Decode the TOC JSON
+ var tocReader io.Reader
+ if tocOffset >= 0 {
+ tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
+ }
+ decodedJTOC, _, err = controller.ParseTOC(tocReader)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
+ }
+ return decodedJTOC, tocOffset, nil
+}
+
+func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
+ const content = "Some contents"
+ invalidUtf8 := "\xff\xfe\xfd"
+
+ xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
+ sampleOwner := owner{uid: 50, gid: 100}
+
+ data64KB := randomContents(64000)
+
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize int
+ in []tarEntry
+ want []stargzCheck
+ wantNumGz int // expected number of streams
+
+ wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
+ wantFailOnLossLess bool
+ wantTOCVersion int // default = 1
+ }{
+ {
+ name: "empty",
+ in: tarOf(),
+ wantNumGz: 2, // (empty tar) + TOC + footer
+ want: checks(
+ numTOCEntries(0),
+ ),
+ },
+ {
+ name: "1dir_1empty_file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", ""),
+ ),
+ wantNumGz: 3, // dir, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", 0),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileDigest("foo/bar.txt", digestFor("")),
+ ),
+ },
+ {
+ name: "1dir_1file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", content, xAttrFile),
+ ),
+ wantNumGz: 4, // var dir, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ hasFileDigest("foo/bar.txt", digestFor(content)),
+ hasFileContentsRange("foo/bar.txt", 0, content),
+ hasFileContentsRange("foo/bar.txt", 1, content[1:]),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileXattrs("foo/bar.txt", "foo", "bar"),
+ hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8),
+ ),
+ },
+ {
+ name: "2meta_2file",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ },
+ {
+ name: "3dir",
+ in: tarOf(
+ dir("bar/"),
+ dir("foo/"),
+ dir("foo/bar/"),
+ ),
+ wantNumGz: 3, // 3 dirs, TOC, footer
+ want: checks(
+ hasDirLinkCount("bar/", 2),
+ hasDirLinkCount("foo/", 3),
+ hasDirLinkCount("foo/bar/", 2),
+ ),
+ },
+ {
+ name: "symlink",
+ in: tarOf(
+ dir("foo/"),
+ symlink("foo/bar", "../../x"),
+ ),
+ wantNumGz: 3, // metas + TOC + footer
+ want: checks(
+ numTOCEntries(2),
+ hasSymlink("foo/bar", "../../x"),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar"),
+ ),
+ },
+ {
+ name: "chunked_file",
+ chunkSize: 4,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
+ ),
+ wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
+ want: checks(
+ numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
+ hasDir("foo/"),
+ hasFileLen("foo/big.txt", len("This is such a big file")),
+ hasFileDigest("foo/big.txt", digestFor("This is such a big file")),
+ hasFileContentsRange("foo/big.txt", 0, "This is such a big file"),
+ hasFileContentsRange("foo/big.txt", 1, "his is such a big file"),
+ hasFileContentsRange("foo/big.txt", 2, "is is such a big file"),
+ hasFileContentsRange("foo/big.txt", 3, "s is such a big file"),
+ hasFileContentsRange("foo/big.txt", 4, " is such a big file"),
+ hasFileContentsRange("foo/big.txt", 5, "is such a big file"),
+ hasFileContentsRange("foo/big.txt", 6, "s such a big file"),
+ hasFileContentsRange("foo/big.txt", 7, " such a big file"),
+ hasFileContentsRange("foo/big.txt", 8, "such a big file"),
+ hasFileContentsRange("foo/big.txt", 9, "uch a big file"),
+ hasFileContentsRange("foo/big.txt", 10, "ch a big file"),
+ hasFileContentsRange("foo/big.txt", 11, "h a big file"),
+ hasFileContentsRange("foo/big.txt", 12, " a big file"),
+ hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""),
+ hasChunkEntries("foo/big.txt", 6),
+ ),
+ },
+ {
+ name: "recursive",
+ in: tarOf(
+ dir("/", sampleOwner),
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // dirs, bar.txt alone, TOC, footer
+ want: checks(
+ maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt"
+ ),
+ },
+ {
+ name: "block_char_fifo",
+ in: tarOf(
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "b",
+ Typeflag: tar.TypeBlock,
+ Devmajor: 123,
+ Devminor: 456,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "c",
+ Typeflag: tar.TypeChar,
+ Devmajor: 111,
+ Devminor: 222,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "f",
+ Typeflag: tar.TypeFifo,
+ Format: format,
+ })
+ }),
+ ),
+ wantNumGz: 3,
+ want: checks(
+ lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}),
+ lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}),
+ lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}),
+ ),
+ },
+ {
+ name: "modes",
+ in: tarOf(
+ dir("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ file("foo1/bar1", content, 0700|os.ModeSetuid),
+ file("foo1/bar2", content, 0755|os.ModeSetgid),
+ dir("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ file("foo2/bar3", content, 0755|os.ModeSticky),
+ dir("foo3/", 0755|os.ModeDir),
+ file("foo3/bar4", content, os.FileMode(0700)),
+ file("foo3/bar5", content, os.FileMode(0755)),
+ ),
+ wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer
+ want: checks(
+ hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ hasMode("foo1/bar1", 0700|os.ModeSetuid),
+ hasMode("foo1/bar2", 0755|os.ModeSetgid),
+ hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ hasMode("foo2/bar3", 0755|os.ModeSticky),
+ hasMode("foo3/", 0755|os.ModeDir),
+ hasMode("foo3/bar4", os.FileMode(0700)),
+ hasMode("foo3/bar5", os.FileMode(0755)),
+ ),
+ },
+ {
+ name: "lossy",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ wantFailOnLossLess: true,
+ },
+ {
+ name: "hardlink should be replaced to the destination entry",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", "test"),
+ link("foolink", "foo/foo1"),
+ ),
+ wantNumGz: 4, // dir, foo1 + link, TOC, footer
+ want: checks(
+ mustSameEntry("foo/foo1", "foolink"),
+ ),
+ },
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ file("foo22", "ccc"),
+ dir("bar/"),
+ file("bar/bar.txt", "aaa"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo22", len("ccc")),
+ hasFileLen("bar/bar.txt", len("aaa")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo22", digestFor("ccc")),
+ hasFileDigest("bar/bar.txt", digestFor("aaa")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo22", 0, "ccc"),
+ hasFileContentsRange("foo22", 1, "cc"),
+ hasFileContentsRange("foo22", 2, "c"),
+ hasFileContentsRange("bar/bar.txt", 0, "aaa"),
+ hasFileContentsRange("bar/bar.txt", 1, "aa"),
+ hasFileContentsRange("bar/bar.txt", 2, "a"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ dir("bar/"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
+ hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ }
+
+ for _, tt := range tests {
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, lossless := range []bool{true, false} {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
+ var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
+ origTarDgstr := digest.Canonical.Digester()
+ tr = io.TeeReader(tr, origTarDgstr.Hash())
+ var stargzBuf bytes.Buffer
+ cl1 := newCL()
+ w := NewWriterWithCompressor(&stargzBuf, cl1)
+ w.ChunkSize = tt.chunkSize
+ w.MinChunkSize = tt.minChunkSize
+ if lossless {
+ err := w.AppendTarLossLess(tr)
+ if tt.wantFailOnLossLess {
+ if err != nil {
+ return // expected to fail
+ }
+ t.Fatalf("Append wanted to fail on lossless")
+ }
+ if err != nil {
+ t.Fatalf("Append(lossless): %v", err)
+ }
+ } else {
+ if err := w.AppendTar(tr); err != nil {
+ t.Fatalf("Append: %v", err)
+ }
+ }
+ if _, err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close: %v", err)
+ }
+ b := stargzBuf.Bytes()
+
+ if lossless {
+ // Check if the result blob reserves original tar metadata
+ rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
+ if err != nil {
+ t.Errorf("failed to decompress blob: %v", err)
+ return
+ }
+ defer rc.Close()
+ resultDgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
+ t.Errorf("failed to read result decompressed blob: %v", err)
+ return
+ }
+ if resultDgstr.Digest() != origTarDgstr.Digest() {
+ t.Errorf("lossy compression occurred: digest=%v; want %v",
+ resultDgstr.Digest(), origTarDgstr.Digest())
+ return
+ }
+ }
+
+ diffID := w.DiffID()
+ wantDiffID := cl1.DiffIDOf(t, b)
+ if diffID != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ telemetry, checkCalled := newCalledTelemetry()
+ sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+ r, err := Open(
+ sr,
+ WithDecompressors(cl1),
+ WithTelemetry(telemetry),
+ )
+ if err != nil {
+ t.Fatalf("stargz.Open: %v", err)
+ }
+ wantTOCVersion := 1
+ if tt.wantTOCVersion > 0 {
+ wantTOCVersion = tt.wantTOCVersion
+ }
+ if r.toc.Version != wantTOCVersion {
+ t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
+ }
+
+ footerSize := cl1.FooterSize()
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ t.Errorf("failed to read footer: %v", err)
+ }
+ _, tocOffset, _, err := cl1.ParseFooter(footer)
+ if err != nil {
+ t.Errorf("failed to parse footer: %v", err)
+ }
+ if err := checkCalled(tocOffset >= 0); err != nil {
+ t.Errorf("telemetry failure: %v", err)
+ }
+
+ wantNumGz := tt.wantNumGz
+ if lossless && tt.wantNumGzLossLess > 0 {
+ wantNumGz = tt.wantNumGzLossLess
+ }
+ streamOffsets := []int64{0}
+ prevOffset := int64(-1)
+ streams := 0
+ for _, e := range r.toc.Entries {
+ if e.Offset > prevOffset {
+ streamOffsets = append(streamOffsets, e.Offset)
+ prevOffset = e.Offset
+ streams++
+ }
+ }
+ streams++ // TOC
+ if tocOffset >= 0 {
+ // toc is in the blob
+ streamOffsets = append(streamOffsets, tocOffset)
+ }
+ streams++ // footer
+ streamOffsets = append(streamOffsets, footerOffset)
+ if streams != wantNumGz {
+ t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
+ }
+
+ t.Logf("testing streams: %+v", streamOffsets)
+ cl1.TestStreams(t, b, streamOffsets)
+
+ for _, want := range tt.want {
+ want.check(t, r)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+type chunkInfo struct {
+ name string
+ data string
+}
+
+func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
+ var getFooterLatencyCalled bool
+ var getTocLatencyCalled bool
+ var deserializeTocLatencyCalled bool
+ return &Telemetry{
+ func(time.Time) { getFooterLatencyCalled = true },
+ func(time.Time) { getTocLatencyCalled = true },
+ func(time.Time) { deserializeTocLatencyCalled = true },
+ }, func(needsGetTOC bool) error {
+ var allErr []error
+ if !getFooterLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
+ }
+ if needsGetTOC {
+ if !getTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
+ }
+ }
+ if !deserializeTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
+ }
+ return errorutil.Aggregate(allErr)
+ }
+}
+
+func digestFor(content string) string {
+ sum := sha256.Sum256([]byte(content))
+ return fmt.Sprintf("sha256:%x", sum)
+}
+
+type numTOCEntries int
+
+func (n numTOCEntries) check(t *testing.T, r *Reader) {
+ if r.toc == nil {
+ t.Fatal("nil TOC")
+ }
+ if got, want := len(r.toc.Entries), int(n); got != want {
+ t.Errorf("got %d TOC entries; want %d", got, want)
+ }
+ t.Logf("got TOC entries:")
+ for i, ent := range r.toc.Entries {
+ entj, _ := json.Marshal(ent)
+ t.Logf(" [%d]: %s\n", i, entj)
+ }
+ if t.Failed() {
+ t.FailNow()
+ }
+}
+
+func checks(s ...stargzCheck) []stargzCheck { return s }
+
+type stargzCheck interface {
+ check(t *testing.T, r *Reader)
+}
+
+type stargzCheckFn func(*testing.T, *Reader)
+
+func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) }
+
+func maxDepth(max int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup("")
+ if !ok {
+ t.Fatal("root directory not found")
+ }
+ d, err := getMaxDepth(t, e, 0, 10*max)
+ if err != nil {
+ t.Errorf("failed to get max depth (wanted %d): %v", max, err)
+ return
+ }
+ if d != max {
+ t.Errorf("invalid depth %d; want %d", d, max)
+ return
+ }
+ })
+}
+
+func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) {
+ if current > limit {
+ return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d",
+ current, limit)
+ }
+ max = current
+ e.ForeachChild(func(baseName string, ent *TOCEntry) bool {
+ t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name)
+ d, err := getMaxDepth(t, ent, current+1, limit)
+ if err != nil {
+ rErr = err
+ return false
+ }
+ if d > max {
+ max = d
+ }
+ return true
+ })
+ return
+}
+
+func hasFileLen(file string, wantLen int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ } else if ent.Size != int64(wantLen) {
+ t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen)
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileXattrs(file, name, value string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ }
+ if ent.Xattrs == nil {
+ t.Errorf("file %q has no xattrs", file)
+ return
+ }
+ valueFound, found := ent.Xattrs[name]
+ if !found {
+ t.Errorf("file %q has no xattr %q", file, name)
+ return
+ }
+ if string(valueFound) != value {
+ t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value)
+ }
+
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileDigest(file string, digest string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for file %q", file)
+ }
+ if ent.Digest != digest {
+ t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest)
+ }
+ })
+}
+
+func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ extraMap := make(map[string]chunkInfo)
+ for _, e := range extra {
+ extraMap[e.name] = e
+ }
+ var extraNames []string
+ for n := range extraMap {
+ extraNames = append(extraNames, n)
+ }
+ f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
+ t.Logf("On %q: got preread of %q", file, e.Name)
+ ex, ok := extraMap[e.Name]
+ if !ok {
+ t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
+ }
+ got, err := io.ReadAll(cr)
+ if err != nil {
+ t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
+ }
+ if ex.data != string(got) {
+ t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
+ }
+ delete(extraMap, e.Name)
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ if len(extraMap) != 0 {
+ var exNames []string
+ for _, ex := range extraMap {
+ exNames = append(exNames, ex.name)
+ }
+ t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
+ }
+ })
+}
+
+func hasFileContentsRange(file string, offset int, want string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ f, err := r.OpenFile(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ })
+}
+
+func hasChunkEntries(file string, wantChunks int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("no file for %q", file)
+ }
+ if ent.Type != "reg" {
+ t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type)
+ }
+ chunks := r.getChunks(ent)
+ if len(chunks) != wantChunks {
+ t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks)
+ return
+ }
+ f := chunks[0]
+
+ var gotChunks []*TOCEntry
+ var last *TOCEntry
+ for off := int64(0); off < f.Size; off++ {
+ e, ok := r.ChunkEntryForOffset(file, off)
+ if !ok {
+ t.Errorf("no ChunkEntryForOffset at %d", off)
+ return
+ }
+ if last != e {
+ gotChunks = append(gotChunks, e)
+ last = e
+ }
+ }
+ if !reflect.DeepEqual(chunks, gotChunks) {
+ t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks)
+ }
+
+ // And verify the NextOffset
+ for i := 0; i < len(gotChunks)-1; i++ {
+ ci := gotChunks[i]
+ cnext := gotChunks[i+1]
+ if ci.NextOffset() != cnext.Offset {
+ t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset)
+ }
+ }
+ })
+}
+
+func entryHasChildren(dir string, want ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ want := append([]string(nil), want...)
+ var got []string
+ ent, ok := r.Lookup(dir)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for dir node %q", dir)
+ }
+ for baseName := range ent.children {
+ got = append(got, baseName)
+ }
+ sort.Strings(got)
+ sort.Strings(want)
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("children of %q = %q; want %q", dir, got, want)
+ }
+ })
+}
+
+func hasDir(file string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasDirLinkCount(file string, count int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ return
+ }
+ if ent.NumLink != count {
+ t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasMode(file string, mode os.FileMode) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Stat().Mode() != mode {
+ t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode)
+ return
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasSymlink(file, target string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "symlink" {
+ t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type)
+ } else if ent.LinkName != target {
+ t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target)
+ }
+ return
+ }
+ }
+ t.Errorf("symlink %q not found", file)
+ })
+}
+
+func lookupMatch(name string, want *TOCEntry) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup(name)
+ if !ok {
+ t.Fatalf("failed to Lookup entry %q", name)
+ }
+ if !reflect.DeepEqual(e, want) {
+ t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want)
+ }
+
+ })
+}
+
+func hasEntryOwner(entry string, owner owner) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(strings.TrimSuffix(entry, "/"))
+ if !ok {
+ t.Errorf("entry %q not found", entry)
+ return
+ }
+ if ent.UID != owner.uid || ent.GID != owner.gid {
+ t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid)
+ return
+ }
+ })
+}
+
+func mustSameEntry(files ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ var first *TOCEntry
+ for _, f := range files {
+ if first == nil {
+ var ok bool
+ first, ok = r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown first file on Lookup: %q", f)
+ return
+ }
+ }
+
+ // Test Lookup
+ e, ok := r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown file on Lookup: %q", f)
+ return
+ }
+ if e != first {
+ t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test LookupChild
+ pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get parent of %q", f)
+ return
+ }
+ e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get %q as the child of %+v", f, pe)
+ return
+ }
+ if e != first {
+ t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test ForeachChild
+ pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
+ if baseName == filepath.Base(filepath.Clean(f)) {
+ if e != first {
+ t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return false
+ }
+ }
+ return true
+ })
+ }
+ })
+}
+
+func viewContent(c []byte) string {
+ if len(c) < 100 {
+ return string(c)
+ }
+ return string(c[:50]) + "...(omit)..." + string(c[50:100])
+}
+
+func tarOf(s ...tarEntry) []tarEntry { return s }
+
+type tarEntry interface {
+ appendTar(tw *tar.Writer, prefix string, format tar.Format) error
+}
+
+type tarEntryFunc func(*tar.Writer, string, tar.Format) error
+
+func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
+ return f(tw, prefix, format)
+}
+
+func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
+ format := tar.FormatUnknown
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case tar.Format:
+ format = v
+ default:
+ panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
+ }
+ }
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, ent := range ents {
+ if err := ent.appendTar(tw, prefix, format); err != nil {
+ t.Fatalf("building input tar: %v", err)
+ }
+ }
+ if err := tw.Close(); err != nil {
+ t.Errorf("closing write of input tar: %v", err)
+ }
+ data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+}
+
+func dir(name string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var o owner
+ mode := os.FileMode(0755)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if !strings.HasSuffix(name, "/") {
+ panic(fmt.Sprintf("missing trailing slash in dir %q ", name))
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeDir,
+ Name: prefix + name,
+ Mode: tm,
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ })
+ })
+}
+
+// xAttr are extended attributes to set on test files created with the file func.
+type xAttr map[string]string
+
+// owner is owner ot set on test files and directories with the file and dir functions.
+type owner struct {
+ uid int
+ gid int
+}
+
+func file(name, contents string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var xattrs xAttr
+ var o owner
+ mode := os.FileMode(0644)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case xAttr:
+ xattrs = v
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if strings.HasSuffix(name, "/") {
+ return fmt.Errorf("bogus trailing slash in file %q", name)
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ if len(xattrs) > 0 {
+ format = tar.FormatPAX // only PAX supports xattrs
+ }
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Mode: tm,
+ Xattrs: xattrs,
+ Size: int64(len(contents)),
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ _, err = io.WriteString(tw, contents)
+ return err
+ })
+}
+
+func symlink(name, target string) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeSymlink,
+ Name: prefix + name,
+ Linkname: target,
+ Mode: 0644,
+ Format: format,
+ })
+ })
+}
+
+func link(name string, linkname string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeLink,
+ Name: prefix + name,
+ Linkname: linkname,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func chardev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeChar,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func blockdev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeBlock,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+func fifo(name string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeFifo,
+ Name: prefix + name,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func prefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func noPrefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
+ if digestMap == nil {
+ t.Fatalf("digest map mustn't be nil")
+ }
+ content := []byte(contentStr)
+
+ var n int64
+ for n < int64(len(content)) {
+ size := int64(chunkSize)
+ remain := int64(len(content)) - n
+ if remain < size {
+ size = remain
+ }
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil {
+ t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)",
+ string(content[n:n+size]), name, n, size)
+ }
+ digestMap[chunkID(name, n, size)] = dgstr.Digest()
+ n += size
+ }
+
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Size: int64(len(content)),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+func randomContents(n int) string {
+ b := make([]rune, n)
+ for i := range b {
+ b[i] = runes[rand.Intn(len(runes))]
+ }
+ return string(b)
+}
+
+func fileModeToTarMode(mode os.FileMode) (int64, error) {
+ h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
+ if err != nil {
+ return 0, err
+ }
+ return h.Mode, nil
+}
+
+// fileInfoOnlyMode is os.FileMode that populates only file mode.
+type fileInfoOnlyMode os.FileMode
+
+func (f fileInfoOnlyMode) Name() string { return "" }
+func (f fileInfoOnlyMode) Size() int64 { return 0 }
+func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
+func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
+func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
+func (f fileInfoOnlyMode) Sys() interface{} { return nil }
+
+func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
+ if len(streams) == 0 {
+ return // nop
+ }
+
+ wants := map[int64]struct{}{}
+ for _, s := range streams {
+ wants[s] = struct{}{}
+ }
+
+ len0 := len(b)
+ br := bytes.NewReader(b)
+ zr := new(gzip.Reader)
+ t.Logf("got gzip streams:")
+ numStreams := 0
+ for {
+ zoff := len0 - br.Len()
+ if err := zr.Reset(br); err != nil {
+ if err == io.EOF {
+ return
+ }
+ t.Fatalf("countStreams(gzip), Reset: %v", err)
+ }
+ zr.Multistream(false)
+ n, err := io.Copy(io.Discard, zr)
+ if err != nil {
+ t.Fatalf("countStreams(gzip), Copy: %v", err)
+ }
+ var extra string
+ if len(zr.Header.Extra) > 0 {
+ extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
+ }
+ t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
+ delete(wants, int64(zoff))
+ numStreams++
+ }
+}
+
+func GzipDiffIDOf(t *testing.T, b []byte) string {
+ h := sha256.New()
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("diffIDOf(gzip): %v", err)
+ }
+ defer zr.Close()
+ if _, err := io.Copy(h, zr); err != nil {
+ t.Fatalf("diffIDOf(gzip).Copy: %v", err)
+ }
+ return fmt.Sprintf("sha256:%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
new file mode 100644
index 000000000000..57e0aa614e4a
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
@@ -0,0 +1,342 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ // TOCTarName is the name of the JSON file in the tar archive in the
+ // table of contents gzip stream.
+ TOCTarName = "stargz.index.json"
+
+ // FooterSize is the number of bytes in the footer
+ //
+ // The footer is an empty gzip stream with no compression and an Extra
+ // header of the form "%016xSTARGZ", where the 64 bit hex-encoded
+ // number is the offset to the gzip stream of JSON TOC.
+ //
+ // 51 comes from:
+ //
+ // 10 bytes gzip header
+ // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
+ // 2 bytes Extra: SI1 = 'S', SI2 = 'G'
+ // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
+ // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
+ // 5 bytes flate header
+ // 8 bytes gzip footer
+ // (End of the eStargz blob)
+ //
+ // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
+ FooterSize = 51
+
+ // legacyFooterSize is the number of bytes in the legacy stargz footer.
+ //
+ // 47 comes from:
+ //
+ // 10 byte gzip header +
+ // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
+ // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
+ // 5 byte flate header
+ // 8 byte gzip footer (two little endian uint32s: digest, size)
+ legacyFooterSize = 47
+
+ // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
+ // digest of the TOC JSON.
+ // This annotation is valid only when it is specified in `.[]layers.annotations`
+ // of an image manifest.
+ TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
+
+ // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
+ // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
+ // to the runtime but current OCI image doesn't ship this information by default. So we store this
+ // to the special annotation.
+ StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
+
+ // PrefetchLandmark is a file entry which indicates the end position of
+ // prefetch in the stargz file.
+ PrefetchLandmark = ".prefetch.landmark"
+
+ // NoPrefetchLandmark is a file entry which indicates that no prefetch should
+ // occur in the stargz file.
+ NoPrefetchLandmark = ".no.prefetch.landmark"
+
+ landmarkContents = 0xf
+)
+
+// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
+type JTOC struct {
+ Version int `json:"version"`
+ Entries []*TOCEntry `json:"entries"`
+}
+
+// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
+type TOCEntry struct {
+ // Name is the tar entry's name. It is the complete path
+ // stored in the tar file, not just the base name.
+ Name string `json:"name"`
+
+ // Type is one of "dir", "reg", "symlink", "hardlink", "char",
+ // "block", "fifo", or "chunk".
+ // The "chunk" type is used for regular file data chunks past the first
+ // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
+ // ChunkOffset, and ChunkSize populated.
+ Type string `json:"type"`
+
+ // Size, for regular files, is the logical size of the file.
+ Size int64 `json:"size,omitempty"`
+
+ // ModTime3339 is the modification time of the tar entry. Empty
+ // means zero or unknown. Otherwise it's in UTC RFC3339
+ // format. Use the ModTime method to access the time.Time value.
+ ModTime3339 string `json:"modtime,omitempty"`
+ modTime time.Time
+
+ // LinkName, for symlinks and hardlinks, is the link target.
+ LinkName string `json:"linkName,omitempty"`
+
+ // Mode is the permission and mode bits.
+ Mode int64 `json:"mode,omitempty"`
+
+ // UID is the user ID of the owner.
+ UID int `json:"uid,omitempty"`
+
+ // GID is the group ID of the owner.
+ GID int `json:"gid,omitempty"`
+
+ // Uname is the username of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same UID.
+ Uname string `json:"userName,omitempty"`
+
+ // Gname is the group name of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same GID.
+ Gname string `json:"groupName,omitempty"`
+
+ // Offset, for regular files, provides the offset in the
+ // stargz file to the file's data bytes. See ChunkOffset and
+ // ChunkSize.
+ Offset int64 `json:"offset,omitempty"`
+
+ // InnerOffset is an optional field indicates uncompressed offset
+ // of this "reg" or "chunk" payload in a stream starts from Offset.
+ // This field enables to put multiple "reg" or "chunk" payloads
+ // in one chunk with having the same Offset but different InnerOffset.
+ InnerOffset int64 `json:"innerOffset,omitempty"`
+
+ nextOffset int64 // the Offset of the next entry with a non-zero Offset
+
+ // DevMajor is the major device number for "char" and "block" types.
+ DevMajor int `json:"devMajor,omitempty"`
+
+ // DevMinor is the major device number for "char" and "block" types.
+ DevMinor int `json:"devMinor,omitempty"`
+
+ // NumLink is the number of entry names pointing to this entry.
+ // Zero means one name references this entry.
+ // This field is calculated during runtime and not recorded in TOC JSON.
+ NumLink int `json:"-"`
+
+ // Xattrs are the extended attribute for the entry.
+ Xattrs map[string][]byte `json:"xattrs,omitempty"`
+
+ // Digest stores the OCI checksum for regular files payload.
+ // It has the form "sha256:abcdef01234....".
+ Digest string `json:"digest,omitempty"`
+
+ // ChunkOffset is non-zero if this is a chunk of a large,
+ // regular file. If so, the Offset is where the gzip header of
+ // ChunkSize bytes at ChunkOffset in Name begin.
+ //
+ // In serialized form, a "chunkSize" JSON field of zero means
+ // that the chunk goes to the end of the file. After reading
+ // from the stargz TOC, though, the ChunkSize is initialized
+ // to a non-zero file for when Type is either "reg" or
+ // "chunk".
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+
+ // ChunkDigest stores an OCI digest of the chunk. This must be formed
+ // as "sha256:0123abcd...".
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+
+ children map[string]*TOCEntry
+
+ // chunkTopIndex is index of the entry where Offset starts in the blob.
+ chunkTopIndex int
+}
+
+// ModTime returns the entry's modification time.
+func (e *TOCEntry) ModTime() time.Time { return e.modTime }
+
+// NextOffset returns the position (relative to the start of the
+// stargz file) of the next gzip boundary after e.Offset.
+func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
+
+func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
+ if e.children == nil {
+ e.children = make(map[string]*TOCEntry)
+ }
+ if child.Type == "dir" {
+ e.NumLink++ // Entry ".." in the subdirectory links to this directory
+ }
+ e.children[baseName] = child
+}
+
+// isDataType reports whether TOCEntry is a regular file or chunk (something that
+// contains regular file data).
+func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
+
+// Stat returns a FileInfo value representing e.
+func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
+
+// ForeachChild calls f for each child item. If f returns false, iteration ends.
+// If e is not a directory, f is not called.
+func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
+ for name, ent := range e.children {
+ if !f(name, ent) {
+ return
+ }
+ }
+}
+
+// LookupChild returns the directory e's child by its base name.
+func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
+ child, ok = e.children[baseName]
+ return
+}
+
+// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
+type fileInfo struct{ e *TOCEntry }
+
+var _ os.FileInfo = fileInfo{}
+
+func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
+func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
+func (fi fileInfo) Size() int64 { return fi.e.Size }
+func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
+func (fi fileInfo) Sys() interface{} { return fi.e }
+func (fi fileInfo) Mode() (m os.FileMode) {
+ // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
+ m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
+ (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
+ switch fi.e.Type {
+ case "dir":
+ m |= os.ModeDir
+ case "symlink":
+ m |= os.ModeSymlink
+ case "char":
+ m |= os.ModeDevice | os.ModeCharDevice
+ case "block":
+ m |= os.ModeDevice
+ case "fifo":
+ m |= os.ModeNamedPipe
+ }
+ return m
+}
+
+// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
+// in a eStargz blob.
+type TOCEntryVerifier interface {
+
+ // Verifier provides a content verifier that can be used for verifying the
+ // contents of the specified TOCEntry.
+ Verifier(ce *TOCEntry) (digest.Verifier, error)
+}
+
+// Compression provides the compression helper to be used creating and parsing eStargz.
+// This package provides gzip-based Compression by default, but any compression
+// algorithm (e.g. zstd) can be used as long as it implements Compression.
+type Compression interface {
+ Compressor
+ Decompressor
+}
+
+// Compressor represents the helper mothods to be used for creating eStargz.
+type Compressor interface {
+ // Writer returns WriteCloser to be used for writing a chunk to eStargz.
+ // Everytime a chunk is written, the WriteCloser is closed and Writer is
+ // called again for writing the next chunk.
+ //
+ // The returned writer should implement "Flush() error" function that flushes
+ // any pending compressed data to the underlying writer.
+ Writer(w io.Writer) (WriteFlushCloser, error)
+
+ // WriteTOCAndFooter is called to write JTOC to the passed Writer.
+ // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
+ // WriteTOCAndFooter can optionally write anything that affects DiffID calculation
+ // (e.g. uncompressed TOC JSON).
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob when it's parsed.
+ WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
+}
+
+// Decompressor represents the helper mothods to be used for parsing eStargz.
+type Decompressor interface {
+ // Reader returns ReadCloser to be used for decompressing file payload.
+ Reader(r io.Reader) (io.ReadCloser, error)
+
+ // FooterSize returns the size of the footer of this blob.
+ FooterSize() int64
+
+ // ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
+ // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
+ // the top until the TOC JSON).
+ //
+ // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
+ // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
+ //
+ // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
+ // footer (blob size - tocOff - FooterSize).
+ // If blobPayloadSize < 0, blobPayloadSize become the blob size.
+ ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
+
+ // ParseTOC parses TOC from the passed reader. The reader provides the partial contents
+ // of the underlying blob that has the range specified by ParseFooter method.
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob. This must match to the value returned from
+ // Compressor.WriteTOCAndFooter that is used when creating this blob.
+ //
+ // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
+ // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
+ // and return it.
+ ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
+}
+
+type WriteFlushCloser interface {
+ io.WriteCloser
+ Flush() error
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go
new file mode 100644
index 000000000000..306038a74664
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go
@@ -0,0 +1,201 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package zstdchunked
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ // ManifestChecksumAnnotation is an annotation that contains the compressed TOC Digset
+ ManifestChecksumAnnotation = "io.containers.zstd-chunked.manifest-checksum"
+
+ // ManifestPositionAnnotation is an annotation that contains the offset to the TOC.
+ ManifestPositionAnnotation = "io.containers.zstd-chunked.manifest-position"
+
+ // FooterSize is the size of the footer
+ FooterSize = 40
+
+ manifestTypeCRFS = 1
+)
+
+var (
+ skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
+ zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
+ zstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
+)
+
+type Decompressor struct{}
+
+func (zz *Decompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ decoder, err := zstd.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ return &zstdReadCloser{decoder}, nil
+}
+
+func (zz *Decompressor) ParseTOC(r io.Reader) (toc *estargz.JTOC, tocDgst digest.Digest, err error) {
+ zr, err := zstd.NewReader(r)
+ if err != nil {
+ return nil, "", err
+ }
+ defer zr.Close()
+ dgstr := digest.Canonical.Digester()
+ toc = new(estargz.JTOC)
+ if err := json.NewDecoder(io.TeeReader(zr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %w", err)
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func (zz *Decompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ offset := binary.LittleEndian.Uint64(p[0:8])
+ compressedLength := binary.LittleEndian.Uint64(p[8:16])
+ if !bytes.Equal(zstdChunkedFrameMagic, p[32:40]) {
+ return 0, 0, 0, fmt.Errorf("invalid magic number")
+ }
+ // 8 is the size of the zstd skippable frame header + the frame size (see WriteTOCAndFooter)
+ return int64(offset - 8), int64(offset), int64(compressedLength), nil
+}
+
+func (zz *Decompressor) FooterSize() int64 {
+ return FooterSize
+}
+
+func (zz *Decompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ decoder, err := zstd.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ br := bufio.NewReader(decoder)
+ if _, err := br.Peek(1); err != nil {
+ return nil, err
+ }
+ return &reader{br, decoder.Close}, nil
+}
+
+type reader struct {
+ io.Reader
+ closeFunc func()
+}
+
+func (r *reader) Close() error { r.closeFunc(); return nil }
+
+type zstdReadCloser struct{ *zstd.Decoder }
+
+func (z *zstdReadCloser) Close() error {
+ z.Decoder.Close()
+ return nil
+}
+
+type Compressor struct {
+ CompressionLevel zstd.EncoderLevel
+ Metadata map[string]string
+
+ pool sync.Pool
+}
+
+func (zc *Compressor) Writer(w io.Writer) (estargz.WriteFlushCloser, error) {
+ if wc := zc.pool.Get(); wc != nil {
+ ec := wc.(*zstd.Encoder)
+ ec.Reset(w)
+ return &poolEncoder{ec, zc}, nil
+ }
+ ec, err := zstd.NewWriter(w, zstd.WithEncoderLevel(zc.CompressionLevel), zstd.WithLowerEncoderMem(true))
+ if err != nil {
+ return nil, err
+ }
+ return &poolEncoder{ec, zc}, nil
+}
+
+type poolEncoder struct {
+ *zstd.Encoder
+ zc *Compressor
+}
+
+func (w *poolEncoder) Close() error {
+ if err := w.Encoder.Close(); err != nil {
+ return err
+ }
+ w.zc.pool.Put(w.Encoder)
+ return nil
+}
+
+func (zc *Compressor) WriteTOCAndFooter(w io.Writer, off int64, toc *estargz.JTOC, diffHash hash.Hash) (digest.Digest, error) {
+ tocJSON, err := json.MarshalIndent(toc, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ buf := new(bytes.Buffer)
+ encoder, err := zstd.NewWriter(buf, zstd.WithEncoderLevel(zc.CompressionLevel))
+ if err != nil {
+ return "", err
+ }
+ if _, err := encoder.Write(tocJSON); err != nil {
+ return "", err
+ }
+ if err := encoder.Close(); err != nil {
+ return "", err
+ }
+ compressedTOC := buf.Bytes()
+ _, err = io.Copy(w, bytes.NewReader(appendSkippableFrameMagic(compressedTOC)))
+
+ // 8 is the size of the zstd skippable frame header + the frame size
+ tocOff := uint64(off) + 8
+ if _, err := w.Write(appendSkippableFrameMagic(
+ zstdFooterBytes(tocOff, uint64(len(tocJSON)), uint64(len(compressedTOC)))),
+ ); err != nil {
+ return "", err
+ }
+
+ if zc.Metadata != nil {
+ zc.Metadata[ManifestChecksumAnnotation] = digest.FromBytes(compressedTOC).String()
+ zc.Metadata[ManifestPositionAnnotation] = fmt.Sprintf("%d:%d:%d:%d",
+ tocOff, len(compressedTOC), len(tocJSON), manifestTypeCRFS)
+ }
+
+ return digest.FromBytes(tocJSON), err
+}
+
+// zstdFooterBytes returns the 40 bytes footer.
+func zstdFooterBytes(tocOff, tocRawSize, tocCompressedSize uint64) []byte {
+ footer := make([]byte, FooterSize)
+ binary.LittleEndian.PutUint64(footer, tocOff)
+ binary.LittleEndian.PutUint64(footer[8:], tocCompressedSize)
+ binary.LittleEndian.PutUint64(footer[16:], tocRawSize)
+ binary.LittleEndian.PutUint64(footer[24:], manifestTypeCRFS)
+ copy(footer[32:40], zstdChunkedFrameMagic)
+ return footer
+}
+
+func appendSkippableFrameMagic(b []byte) []byte {
+ size := make([]byte, 4)
+ binary.LittleEndian.PutUint32(size, uint32(len(b)))
+ return append(append(skippableFrameMagic, size...), b...)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go b/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go
new file mode 100644
index 000000000000..890aded7400b
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go
@@ -0,0 +1,151 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package config
+
+const (
+ // TargetSkipVerifyLabel is a snapshot label key that indicates to skip content
+ // verification for the layer.
+ TargetSkipVerifyLabel = "containerd.io/snapshot/remote/stargz.skipverify"
+
+ // TargetPrefetchSizeLabel is a snapshot label key that indicates size to prefetch
+ // the layer. If the layer is eStargz and contains prefetch landmarks, these config
+ // will be respeced.
+ TargetPrefetchSizeLabel = "containerd.io/snapshot/remote/stargz.prefetch"
+)
+
+// Config is configuration for stargz snapshotter filesystem.
+type Config struct {
+ // Type of cache for compressed contents fetched from the registry. "memory" stores them on memory.
+ // Other values default to cache them on disk.
+ HTTPCacheType string `toml:"http_cache_type"`
+
+ // Type of cache for uncompressed files contents. "memory" stores them on memory. Other values
+ // default to cache them on disk.
+ FSCacheType string `toml:"filesystem_cache_type"`
+
+ // ResolveResultEntryTTLSec is TTL (in sec) to cache resolved layers for
+ // future use. (default 120s)
+ ResolveResultEntryTTLSec int `toml:"resolve_result_entry_ttl_sec"`
+
+ // PrefetchSize is the default size (in bytes) to prefetch when mounting a layer. Default is 0. Stargz-snapshotter still
+ // uses the value specified by the image using "containerd.io/snapshot/remote/stargz.prefetch" or the landmark file.
+ PrefetchSize int64 `toml:"prefetch_size"`
+
+ // PrefetchTimeoutSec is the default timeout (in seconds) when the prefetching takes long. Default is 10s.
+ PrefetchTimeoutSec int64 `toml:"prefetch_timeout_sec"`
+
+ // NoPrefetch disables prefetching. Default is false.
+ NoPrefetch bool `toml:"noprefetch"`
+
+ // NoBackgroundFetch disables the behaviour of fetching the entire layer contents in background. Default is false.
+ NoBackgroundFetch bool `toml:"no_background_fetch"`
+
+ // Debug enables filesystem debug log.
+ Debug bool `toml:"debug"`
+
+ // AllowNoVerification allows mouting images without verification. Default is false.
+ AllowNoVerification bool `toml:"allow_no_verification"`
+
+ // DisableVerification disables verifying layer contents. Default is false.
+ DisableVerification bool `toml:"disable_verification"`
+
+ // MaxConcurrency is max number of concurrent background tasks for fetching layer contents. Default is 2.
+ MaxConcurrency int64 `toml:"max_concurrency"`
+
+ // NoPrometheus disables exposing filesystem-related metrics. Default is false.
+ NoPrometheus bool `toml:"no_prometheus"`
+
+ // BlobConfig is config for layer blob management.
+ BlobConfig `toml:"blob"`
+
+ // DirectoryCacheConfig is config for directory-based cache.
+ DirectoryCacheConfig `toml:"directory_cache"`
+
+ // FuseConfig is configurations for FUSE fs.
+ FuseConfig `toml:"fuse"`
+
+ // ResolveResultEntry is a deprecated field.
+ ResolveResultEntry int `toml:"resolve_result_entry"` // deprecated
+}
+
+// BlobConfig is configuration for the logic to fetching blobs.
+type BlobConfig struct {
+ // ValidInterval specifies a duration (in seconds) during which the layer can be reused without
+ // checking the connection to the registry. Default is 60.
+ ValidInterval int64 `toml:"valid_interval"`
+
+ // CheckAlways overwrites ValidInterval to 0 if it's true. Default is false.
+ CheckAlways bool `toml:"check_always"`
+
+ // ChunkSize is the granularity (in bytes) at which background fetch and on-demand reads
+ // are fetched from the remote registry. Default is 50000.
+ ChunkSize int64 `toml:"chunk_size"`
+
+ // FetchTimeoutSec is a timeout duration (in seconds) for fetching chunks from the registry. Default is 300.
+ FetchTimeoutSec int64 `toml:"fetching_timeout_sec"`
+
+ // ForceSingleRangeMode disables using of multiple ranges in a Range Request and always specifies one larger
+ // region that covers them. Default is false.
+ ForceSingleRangeMode bool `toml:"force_single_range_mode"`
+
+ // PrefetchChunkSize is the maximum bytes transferred per http GET from remote registry
+ // during prefetch. It is recommended to have PrefetchChunkSize > ChunkSize.
+ // If PrefetchChunkSize < ChunkSize prefetch bytes will be fetched as a single http GET,
+ // else total GET requests for prefetch = ceil(PrefetchSize / PrefetchChunkSize).
+ // Default is 0.
+ PrefetchChunkSize int64 `toml:"prefetch_chunk_size"`
+
+ // MaxRetries is a max number of reries of a HTTP request. Default is 5.
+ MaxRetries int `toml:"max_retries"`
+
+ // MinWaitMSec is minimal delay (in seconds) for the next retrying after a request failure. Default is 30.
+ MinWaitMSec int `toml:"min_wait_msec"`
+
+ // MinWaitMSec is maximum delay (in seconds) for the next retrying after a request failure. Default is 30.
+ MaxWaitMSec int `toml:"max_wait_msec"`
+}
+
+// DirectoryCacheConfig is configuration for the disk-based cache.
+type DirectoryCacheConfig struct {
+ // MaxLRUCacheEntry is the number of entries of LRU cache to cache data on memory. Default is 10.
+ MaxLRUCacheEntry int `toml:"max_lru_cache_entry"`
+
+ // MaxCacheFds is the number of entries of LRU cache to hold fds of files of cached contents. Default is 10.
+ MaxCacheFds int `toml:"max_cache_fds"`
+
+ // SyncAdd being true means that each adding of data to the cache blocks until the data is fully written to the
+ // cache directory. Default is false.
+ SyncAdd bool `toml:"sync_add"`
+
+ // Direct disables on-memory data cache. Default is true for saving memory usage.
+ Direct bool `toml:"direct" default:"true"`
+}
+
+// FuseConfig is configuration for FUSE fs.
+type FuseConfig struct {
+ // AttrTimeout defines overall timeout attribute for a file system in seconds.
+ AttrTimeout int64 `toml:"attr_timeout"`
+
+ // EntryTimeout defines TTL for directory, name lookup in seconds.
+ EntryTimeout int64 `toml:"entry_timeout"`
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go b/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go
new file mode 100644
index 000000000000..47191e99ac6d
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go
@@ -0,0 +1,506 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+//
+// Example implementation of FileSystem.
+//
+// This implementation uses stargz by CRFS(/~https://github.com/google/crfs) as
+// image format, which has following feature:
+// - We can use docker registry as a backend store (means w/o additional layer
+// stores).
+// - The stargz-formatted image is still docker-compatible (means normal
+// runtimes can still use the formatted image).
+//
+// Currently, we reimplemented CRFS-like filesystem for ease of integration.
+// But in the near future, we intend to integrate it with CRFS.
+//
+
+package fs
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/containerd/stargz-snapshotter/fs/config"
+ "github.com/containerd/stargz-snapshotter/fs/layer"
+ commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common"
+ layermetrics "github.com/containerd/stargz-snapshotter/fs/metrics/layer"
+ "github.com/containerd/stargz-snapshotter/fs/remote"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ memorymetadata "github.com/containerd/stargz-snapshotter/metadata/memory"
+ "github.com/containerd/stargz-snapshotter/snapshot"
+ "github.com/containerd/stargz-snapshotter/task"
+ metrics "github.com/docker/go-metrics"
+ fusefs "github.com/hanwen/go-fuse/v2/fs"
+ "github.com/hanwen/go-fuse/v2/fuse"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ defaultFuseTimeout = time.Second
+ defaultMaxConcurrency = 2
+)
+
+var fusermountBin = []string{"fusermount", "fusermount3"}
+
+type Option func(*options)
+
+type options struct {
+ getSources source.GetSources
+ resolveHandlers map[string]remote.Handler
+ metadataStore metadata.Store
+ metricsLogLevel *log.Level
+ overlayOpaqueType layer.OverlayOpaqueType
+ additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor
+}
+
+func WithGetSources(s source.GetSources) Option {
+ return func(opts *options) {
+ opts.getSources = s
+ }
+}
+
+func WithResolveHandler(name string, handler remote.Handler) Option {
+ return func(opts *options) {
+ if opts.resolveHandlers == nil {
+ opts.resolveHandlers = make(map[string]remote.Handler)
+ }
+ opts.resolveHandlers[name] = handler
+ }
+}
+
+func WithMetadataStore(metadataStore metadata.Store) Option {
+ return func(opts *options) {
+ opts.metadataStore = metadataStore
+ }
+}
+
+func WithMetricsLogLevel(logLevel log.Level) Option {
+ return func(opts *options) {
+ opts.metricsLogLevel = &logLevel
+ }
+}
+
+func WithOverlayOpaqueType(overlayOpaqueType layer.OverlayOpaqueType) Option {
+ return func(opts *options) {
+ opts.overlayOpaqueType = overlayOpaqueType
+ }
+}
+
+func WithAdditionalDecompressors(d func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) Option {
+ return func(opts *options) {
+ opts.additionalDecompressors = d
+ }
+}
+
+func NewFilesystem(root string, cfg config.Config, opts ...Option) (_ snapshot.FileSystem, err error) {
+ var fsOpts options
+ for _, o := range opts {
+ o(&fsOpts)
+ }
+ maxConcurrency := cfg.MaxConcurrency
+ if maxConcurrency == 0 {
+ maxConcurrency = defaultMaxConcurrency
+ }
+
+ attrTimeout := time.Duration(cfg.FuseConfig.AttrTimeout) * time.Second
+ if attrTimeout == 0 {
+ attrTimeout = defaultFuseTimeout
+ }
+
+ entryTimeout := time.Duration(cfg.FuseConfig.EntryTimeout) * time.Second
+ if entryTimeout == 0 {
+ entryTimeout = defaultFuseTimeout
+ }
+
+ metadataStore := fsOpts.metadataStore
+ if metadataStore == nil {
+ metadataStore = memorymetadata.NewReader
+ }
+
+ getSources := fsOpts.getSources
+ if getSources == nil {
+ getSources = source.FromDefaultLabels(func(refspec reference.Spec) (hosts []docker.RegistryHost, _ error) {
+ return docker.ConfigureDefaultRegistries(docker.WithPlainHTTP(docker.MatchLocalhost))(refspec.Hostname())
+ })
+ }
+ tm := task.NewBackgroundTaskManager(maxConcurrency, 5*time.Second)
+ r, err := layer.NewResolver(root, tm, cfg, fsOpts.resolveHandlers, metadataStore, fsOpts.overlayOpaqueType, fsOpts.additionalDecompressors)
+ if err != nil {
+ return nil, fmt.Errorf("failed to setup resolver: %w", err)
+ }
+
+ var ns *metrics.Namespace
+ if !cfg.NoPrometheus {
+ ns = metrics.NewNamespace("stargz", "fs", nil)
+ logLevel := log.DebugLevel
+ if fsOpts.metricsLogLevel != nil {
+ logLevel = *fsOpts.metricsLogLevel
+ }
+ commonmetrics.Register(logLevel) // Register common metrics. This will happen only once.
+ }
+ c := layermetrics.NewLayerMetrics(ns)
+ if ns != nil {
+ metrics.Register(ns) // Register layer metrics.
+ }
+
+ return &filesystem{
+ resolver: r,
+ getSources: getSources,
+ prefetchSize: cfg.PrefetchSize,
+ noprefetch: cfg.NoPrefetch,
+ noBackgroundFetch: cfg.NoBackgroundFetch,
+ debug: cfg.Debug,
+ layer: make(map[string]layer.Layer),
+ backgroundTaskManager: tm,
+ allowNoVerification: cfg.AllowNoVerification,
+ disableVerification: cfg.DisableVerification,
+ metricsController: c,
+ attrTimeout: attrTimeout,
+ entryTimeout: entryTimeout,
+ }, nil
+}
+
+type filesystem struct {
+ resolver *layer.Resolver
+ prefetchSize int64
+ noprefetch bool
+ noBackgroundFetch bool
+ debug bool
+ layer map[string]layer.Layer
+ layerMu sync.Mutex
+ backgroundTaskManager *task.BackgroundTaskManager
+ allowNoVerification bool
+ disableVerification bool
+ getSources source.GetSources
+ metricsController *layermetrics.Controller
+ attrTimeout time.Duration
+ entryTimeout time.Duration
+}
+
+func (fs *filesystem) Mount(ctx context.Context, mountpoint string, labels map[string]string) (retErr error) {
+ // Setting the start time to measure the Mount operation duration.
+ start := time.Now()
+
+ // This is a prioritized task and all background tasks will be stopped
+ // execution so this can avoid being disturbed for NW traffic by background
+ // tasks.
+ fs.backgroundTaskManager.DoPrioritizedTask()
+ defer fs.backgroundTaskManager.DonePrioritizedTask()
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("mountpoint", mountpoint))
+
+ // Get source information of this layer.
+ src, err := fs.getSources(labels)
+ if err != nil {
+ return err
+ } else if len(src) == 0 {
+ return fmt.Errorf("source must be passed")
+ }
+
+ defaultPrefetchSize := fs.prefetchSize
+ if psStr, ok := labels[config.TargetPrefetchSizeLabel]; ok {
+ if ps, err := strconv.ParseInt(psStr, 10, 64); err == nil {
+ defaultPrefetchSize = ps
+ }
+ }
+
+ // Resolve the target layer
+ var (
+ resultChan = make(chan layer.Layer)
+ errChan = make(chan error)
+ )
+ go func() {
+ rErr := fmt.Errorf("failed to resolve target")
+ for _, s := range src {
+ l, err := fs.resolver.Resolve(ctx, s.Hosts, s.Name, s.Target)
+ if err == nil {
+ resultChan <- l
+ fs.prefetch(ctx, l, defaultPrefetchSize, start)
+ return
+ }
+ rErr = fmt.Errorf("failed to resolve layer %q from %q: %v: %w", s.Target.Digest, s.Name, err, rErr)
+ }
+ errChan <- rErr
+ }()
+
+ // Also resolve and cache other layers in parallel
+ preResolve := src[0] // TODO: should we pre-resolve blobs in other sources as well?
+ for _, desc := range neighboringLayers(preResolve.Manifest, preResolve.Target) {
+ desc := desc
+ go func() {
+ // Avoids to get canceled by client.
+ ctx := log.WithLogger(context.Background(), log.G(ctx).WithField("mountpoint", mountpoint))
+ l, err := fs.resolver.Resolve(ctx, preResolve.Hosts, preResolve.Name, desc)
+ if err != nil {
+ log.G(ctx).WithError(err).Debug("failed to pre-resolve")
+ return
+ }
+ fs.prefetch(ctx, l, defaultPrefetchSize, start)
+
+ // Release this layer because this isn't target and we don't use it anymore here.
+ // However, this will remain on the resolver cache until eviction.
+ l.Done()
+ }()
+ }
+
+ // Wait for resolving completion
+ var l layer.Layer
+ select {
+ case l = <-resultChan:
+ case err := <-errChan:
+ log.G(ctx).WithError(err).Debug("failed to resolve layer")
+ return fmt.Errorf("failed to resolve layer: %w", err)
+ case <-time.After(30 * time.Second):
+ log.G(ctx).Debug("failed to resolve layer (timeout)")
+ return fmt.Errorf("failed to resolve layer (timeout)")
+ }
+ defer func() {
+ if retErr != nil {
+ l.Done() // don't use this layer.
+ }
+ }()
+
+ // Verify layer's content
+ if fs.disableVerification {
+ // Skip if verification is disabled completely
+ l.SkipVerify()
+ log.G(ctx).Infof("Verification forcefully skipped")
+ } else if tocDigest, ok := labels[estargz.TOCJSONDigestAnnotation]; ok {
+ // Verify this layer using the TOC JSON digest passed through label.
+ dgst, err := digest.Parse(tocDigest)
+ if err != nil {
+ log.G(ctx).WithError(err).Debugf("failed to parse passed TOC digest %q", dgst)
+ return fmt.Errorf("invalid TOC digest: %v: %w", tocDigest, err)
+ }
+ if err := l.Verify(dgst); err != nil {
+ log.G(ctx).WithError(err).Debugf("invalid layer")
+ return fmt.Errorf("invalid stargz layer: %w", err)
+ }
+ log.G(ctx).Debugf("verified")
+ } else if _, ok := labels[config.TargetSkipVerifyLabel]; ok && fs.allowNoVerification {
+ // If unverified layer is allowed, use it with warning.
+ // This mode is for legacy stargz archives which don't contain digests
+ // necessary for layer verification.
+ l.SkipVerify()
+ log.G(ctx).Warningf("No verification is held for layer")
+ } else {
+ // Verification must be done. Don't mount this layer.
+ return fmt.Errorf("digest of TOC JSON must be passed")
+ }
+ node, err := l.RootNode(0)
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("Failed to get root node")
+ return fmt.Errorf("failed to get root node: %w", err)
+ }
+
+ // Measuring duration of Mount operation for resolved layer.
+ digest := l.Info().Digest // get layer sha
+ defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.Mount, digest, start)
+
+ // Register the mountpoint layer
+ fs.layerMu.Lock()
+ fs.layer[mountpoint] = l
+ fs.layerMu.Unlock()
+ fs.metricsController.Add(mountpoint, l)
+
+ // mount the node to the specified mountpoint
+ // TODO: bind mount the state directory as a read-only fs on snapshotter's side
+ rawFS := fusefs.NewNodeFS(node, &fusefs.Options{
+ AttrTimeout: &fs.attrTimeout,
+ EntryTimeout: &fs.entryTimeout,
+ NullPermissions: true,
+ })
+ mountOpts := &fuse.MountOptions{
+ AllowOther: true, // allow users other than root&mounter to access fs
+ FsName: "stargz", // name this filesystem as "stargz"
+ Debug: fs.debug,
+ }
+ if isFusermountBinExist() {
+ log.G(ctx).Infof("fusermount detected")
+ mountOpts.Options = []string{"suid"} // option for fusermount; allow setuid inside container
+ } else {
+ log.G(ctx).WithError(err).Infof("%s not installed; trying direct mount", fusermountBin)
+ mountOpts.DirectMount = true
+ }
+ server, err := fuse.NewServer(rawFS, mountpoint, mountOpts)
+ if err != nil {
+ log.G(ctx).WithError(err).Debug("failed to make filesystem server")
+ return err
+ }
+
+ go server.Serve()
+ return server.WaitMount()
+}
+
+func (fs *filesystem) Check(ctx context.Context, mountpoint string, labels map[string]string) error {
+ // This is a prioritized task and all background tasks will be stopped
+ // execution so this can avoid being disturbed for NW traffic by background
+ // tasks.
+ fs.backgroundTaskManager.DoPrioritizedTask()
+ defer fs.backgroundTaskManager.DonePrioritizedTask()
+
+ defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.PrefetchesCompleted, digest.FromString(""), time.Now()) // measuring the time the container launch is blocked on prefetch to complete
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("mountpoint", mountpoint))
+
+ fs.layerMu.Lock()
+ l := fs.layer[mountpoint]
+ fs.layerMu.Unlock()
+ if l == nil {
+ log.G(ctx).Debug("layer not registered")
+ return fmt.Errorf("layer not registered")
+ }
+
+ if l.Info().FetchedSize < l.Info().Size {
+ // Image contents hasn't fully cached yet.
+ // Check the blob connectivity and try to refresh the connection on failure
+ if err := fs.check(ctx, l, labels); err != nil {
+ log.G(ctx).WithError(err).Warn("check failed")
+ return err
+ }
+ }
+
+ // Wait for prefetch compeletion
+ if !fs.noprefetch {
+ if err := l.WaitForPrefetchCompletion(); err != nil {
+ log.G(ctx).WithError(err).Warn("failed to sync with prefetch completion")
+ }
+ }
+
+ return nil
+}
+
+func (fs *filesystem) check(ctx context.Context, l layer.Layer, labels map[string]string) error {
+ err := l.Check()
+ if err == nil {
+ return nil
+ }
+ log.G(ctx).WithError(err).Warn("failed to connect to blob")
+
+ // Check failed. Try to refresh the connection with fresh source information
+ src, err := fs.getSources(labels)
+ if err != nil {
+ return err
+ }
+ var (
+ retrynum = 1
+ rErr = fmt.Errorf("failed to refresh connection")
+ )
+ for retry := 0; retry < retrynum; retry++ {
+ log.G(ctx).Warnf("refreshing(%d)...", retry)
+ for _, s := range src {
+ err := l.Refresh(ctx, s.Hosts, s.Name, s.Target)
+ if err == nil {
+ log.G(ctx).Debug("Successfully refreshed connection")
+ return nil
+ }
+ log.G(ctx).WithError(err).Warnf("failed to refresh the layer %q from %q", s.Target.Digest, s.Name)
+ rErr = fmt.Errorf("failed(layer:%q, ref:%q): %v: %w", s.Target.Digest, s.Name, err, rErr)
+ }
+ }
+
+ return rErr
+}
+
+func (fs *filesystem) Unmount(ctx context.Context, mountpoint string) error {
+ if mountpoint == "" {
+ return fmt.Errorf("mount point must be specified")
+ }
+ fs.layerMu.Lock()
+ l, ok := fs.layer[mountpoint]
+ if !ok {
+ fs.layerMu.Unlock()
+ return fmt.Errorf("specified path %q isn't a mountpoint", mountpoint)
+ }
+ delete(fs.layer, mountpoint) // unregisters the corresponding layer
+ l.Done()
+ fs.layerMu.Unlock()
+ fs.metricsController.Remove(mountpoint)
+
+ if err := unmount(mountpoint, 0); err != nil {
+ if err != unix.EBUSY {
+ return err
+ }
+ // Try force unmount
+ log.G(ctx).WithError(err).Debugf("trying force unmount %q", mountpoint)
+ if err := unmount(mountpoint, unix.MNT_FORCE); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func unmount(target string, flags int) error {
+ for {
+ if err := unix.Unmount(target, flags); err != unix.EINTR {
+ return err
+ }
+ }
+}
+
+func (fs *filesystem) prefetch(ctx context.Context, l layer.Layer, defaultPrefetchSize int64, start time.Time) {
+ // Prefetch a layer. The first Check() for this layer waits for the prefetch completion.
+ if !fs.noprefetch {
+ go l.Prefetch(defaultPrefetchSize)
+ }
+
+ // Fetch whole layer aggressively in background.
+ if !fs.noBackgroundFetch {
+ go func() {
+ if err := l.BackgroundFetch(); err == nil {
+ // write log record for the latency between mount start and last on demand fetch
+ commonmetrics.LogLatencyForLastOnDemandFetch(ctx, l.Info().Digest, start, l.Info().ReadTime)
+ }
+ }()
+ }
+}
+
+// neighboringLayers returns layer descriptors except the `target` layer in the specified manifest.
+func neighboringLayers(manifest ocispec.Manifest, target ocispec.Descriptor) (descs []ocispec.Descriptor) {
+ for _, desc := range manifest.Layers {
+ if desc.Digest.String() != target.Digest.String() {
+ descs = append(descs, desc)
+ }
+ }
+ return
+}
+
+func isFusermountBinExist() bool {
+ for _, b := range fusermountBin {
+ if _, err := exec.LookPath(b); err == nil {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go
new file mode 100644
index 000000000000..a71848dbbc37
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go
@@ -0,0 +1,681 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package layer
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/cache"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/containerd/stargz-snapshotter/estargz/zstdchunked"
+ "github.com/containerd/stargz-snapshotter/fs/config"
+ commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common"
+ "github.com/containerd/stargz-snapshotter/fs/reader"
+ "github.com/containerd/stargz-snapshotter/fs/remote"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ "github.com/containerd/stargz-snapshotter/task"
+ "github.com/containerd/stargz-snapshotter/util/cacheutil"
+ "github.com/containerd/stargz-snapshotter/util/namedmutex"
+ fusefs "github.com/hanwen/go-fuse/v2/fs"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ defaultResolveResultEntryTTLSec = 120
+ defaultMaxLRUCacheEntry = 10
+ defaultMaxCacheFds = 10
+ defaultPrefetchTimeoutSec = 10
+ memoryCacheType = "memory"
+)
+
+// Layer represents a layer.
+type Layer interface {
+ // Info returns the information of this layer.
+ Info() Info
+
+ // RootNode returns the root node of this layer.
+ RootNode(baseInode uint32) (fusefs.InodeEmbedder, error)
+
+ // Check checks if the layer is still connectable.
+ Check() error
+
+ // Refresh refreshes the layer connection.
+ Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error
+
+ // Verify verifies this layer using the passed TOC Digest.
+ // Nop if Verify() or SkipVerify() was already called.
+ Verify(tocDigest digest.Digest) (err error)
+
+ // SkipVerify skips verification for this layer.
+ // Nop if Verify() or SkipVerify() was already called.
+ SkipVerify()
+
+ // Prefetch prefetches the specified size. If the layer is eStargz and contains landmark files,
+ // the range indicated by these files is respected.
+ Prefetch(prefetchSize int64) error
+
+ // ReadAt reads this layer.
+ ReadAt([]byte, int64, ...remote.Option) (int, error)
+
+ // WaitForPrefetchCompletion waits untils Prefetch completes.
+ WaitForPrefetchCompletion() error
+
+ // BackgroundFetch fetches the entire layer contents to the cache.
+ // Fetching contents is done as a background task.
+ BackgroundFetch() error
+
+ // Done releases the reference to this layer. The resources related to this layer will be
+ // discarded sooner or later. Queries after calling this function won't be serviced.
+ Done()
+}
+
+// Info is the current status of a layer.
+type Info struct {
+ Digest digest.Digest
+ Size int64 // layer size in bytes
+ FetchedSize int64 // layer fetched size in bytes
+ PrefetchSize int64 // layer prefetch size in bytes
+ ReadTime time.Time // last time the layer was read
+ TOCDigest digest.Digest
+}
+
+// Resolver resolves the layer location and provieds the handler of that layer.
+type Resolver struct {
+ rootDir string
+ resolver *remote.Resolver
+ prefetchTimeout time.Duration
+ layerCache *cacheutil.TTLCache
+ layerCacheMu sync.Mutex
+ blobCache *cacheutil.TTLCache
+ blobCacheMu sync.Mutex
+ backgroundTaskManager *task.BackgroundTaskManager
+ resolveLock *namedmutex.NamedMutex
+ config config.Config
+ metadataStore metadata.Store
+ overlayOpaqueType OverlayOpaqueType
+ additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor
+}
+
+// NewResolver returns a new layer resolver.
+func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) {
+ resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second
+ if resolveResultEntryTTL == 0 {
+ resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second
+ }
+ prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second
+ if prefetchTimeout == 0 {
+ prefetchTimeout = defaultPrefetchTimeoutSec * time.Second
+ }
+
+ // layerCache caches resolved layers for future use. This is useful in a use-case where
+ // the filesystem resolves and caches all layers in an image (not only queried one) in parallel,
+ // before they are actually queried.
+ layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL)
+ layerCache.OnEvicted = func(key string, value interface{}) {
+ if err := value.(*layer).close(); err != nil {
+ log.L.WithField("key", key).WithError(err).Warnf("failed to clean up layer")
+ return
+ }
+ log.L.WithField("key", key).Debugf("cleaned up layer")
+ }
+
+ // blobCache caches resolved blobs for futural use. This is especially useful when a layer
+ // isn't eStargz/stargz (the *layer object won't be created/cached in this case).
+ blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL)
+ blobCache.OnEvicted = func(key string, value interface{}) {
+ if err := value.(remote.Blob).Close(); err != nil {
+ log.L.WithField("key", key).WithError(err).Warnf("failed to clean up blob")
+ return
+ }
+ log.L.WithField("key", key).Debugf("cleaned up blob")
+ }
+
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, err
+ }
+
+ return &Resolver{
+ rootDir: root,
+ resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers),
+ layerCache: layerCache,
+ blobCache: blobCache,
+ prefetchTimeout: prefetchTimeout,
+ backgroundTaskManager: backgroundTaskManager,
+ config: cfg,
+ resolveLock: new(namedmutex.NamedMutex),
+ metadataStore: metadataStore,
+ overlayOpaqueType: overlayOpaqueType,
+ additionalDecompressors: additionalDecompressors,
+ }, nil
+}
+
+func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache, error) {
+ if cacheType == memoryCacheType {
+ return cache.NewMemoryCache(), nil
+ }
+
+ dcc := cfg.DirectoryCacheConfig
+ maxDataEntry := dcc.MaxLRUCacheEntry
+ if maxDataEntry == 0 {
+ maxDataEntry = defaultMaxLRUCacheEntry
+ }
+ maxFdEntry := dcc.MaxCacheFds
+ if maxFdEntry == 0 {
+ maxFdEntry = defaultMaxCacheFds
+ }
+
+ bufPool := &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+ dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry)
+ dCache.OnEvicted = func(key string, value interface{}) {
+ value.(*bytes.Buffer).Reset()
+ bufPool.Put(value)
+ }
+ fCache.OnEvicted = func(key string, value interface{}) {
+ value.(*os.File).Close()
+ }
+ // create a cache on an unique directory
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, err
+ }
+ cachePath, err := os.MkdirTemp(root, "")
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize directory cache: %w", err)
+ }
+ return cache.NewDirectoryCache(
+ cachePath,
+ cache.DirectoryCacheConfig{
+ SyncAdd: dcc.SyncAdd,
+ DataCache: dCache,
+ FdCache: fCache,
+ BufPool: bufPool,
+ Direct: dcc.Direct,
+ },
+ )
+}
+
+// Resolve resolves a layer based on the passed layer blob information.
+func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) {
+ name := refspec.String() + "/" + desc.Digest.String()
+
+ // Wait if resolving this layer is already running. The result
+ // can hopefully get from the cache.
+ r.resolveLock.Lock(name)
+ defer r.resolveLock.Unlock(name)
+
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name))
+
+ // First, try to retrieve this layer from the underlying cache.
+ r.layerCacheMu.Lock()
+ c, done, ok := r.layerCache.Get(name)
+ r.layerCacheMu.Unlock()
+ if ok {
+ if l := c.(*layer); l.Check() == nil {
+ log.G(ctx).Debugf("hit layer cache %q", name)
+ return &layerRef{l, done}, nil
+ }
+ // Cached layer is invalid
+ done()
+ r.layerCacheMu.Lock()
+ r.layerCache.Remove(name)
+ r.layerCacheMu.Unlock()
+ }
+
+ log.G(ctx).Debugf("resolving")
+
+ // Resolve the blob.
+ blobR, err := r.resolveBlob(ctx, hosts, refspec, desc)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve the blob: %w", err)
+ }
+ defer func() {
+ if retErr != nil {
+ blobR.done()
+ }
+ }()
+
+ fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create fs cache: %w", err)
+ }
+ defer func() {
+ if retErr != nil {
+ fsCache.Close()
+ }
+ }()
+
+ // Get a reader for stargz archive.
+ // Each file's read operation is a prioritized task and all background tasks
+ // will be stopped during the execution so this can avoid being disturbed for
+ // NW traffic by background tasks.
+ sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) {
+ r.backgroundTaskManager.DoPrioritizedTask()
+ defer r.backgroundTaskManager.DonePrioritizedTask()
+ return blobR.ReadAt(p, offset)
+ }), 0, blobR.Size())
+ // define telemetry hooks to measure latency metrics inside estargz package
+ telemetry := metadata.Telemetry{
+ GetFooterLatency: func(start time.Time) {
+ commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start)
+ },
+ GetTocLatency: func(start time.Time) {
+ commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start)
+ },
+ DeserializeTocLatency: func(start time.Time) {
+ commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start)
+ },
+ }
+
+ additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)}
+ if r.additionalDecompressors != nil {
+ additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...)
+ }
+ meta, err := r.metadataStore(sr,
+ append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...)
+ if err != nil {
+ return nil, err
+ }
+ vr, err := reader.NewReader(meta, fsCache, desc.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read layer: %w", err)
+ }
+
+ // Combine layer information together and cache it.
+ l := newLayer(r, desc, blobR, vr)
+ r.layerCacheMu.Lock()
+ cachedL, done2, added := r.layerCache.Add(name, l)
+ r.layerCacheMu.Unlock()
+ if !added {
+ l.close() // layer already exists in the cache. discrad this.
+ }
+
+ log.G(ctx).Debugf("resolved")
+ return &layerRef{cachedL.(*layer), done2}, nil
+}
+
+// resolveBlob resolves a blob based on the passed layer blob information.
+func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) {
+ name := refspec.String() + "/" + desc.Digest.String()
+
+ // Try to retrieve the blob from the underlying cache.
+ r.blobCacheMu.Lock()
+ c, done, ok := r.blobCache.Get(name)
+ r.blobCacheMu.Unlock()
+ if ok {
+ if blob := c.(remote.Blob); blob.Check() == nil {
+ return &blobRef{blob, done}, nil
+ }
+ // invalid blob. discard this.
+ done()
+ r.blobCacheMu.Lock()
+ r.blobCache.Remove(name)
+ r.blobCacheMu.Unlock()
+ }
+
+ httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create http cache: %w", err)
+ }
+ defer func() {
+ if retErr != nil {
+ httpCache.Close()
+ }
+ }()
+
+ // Resolve the blob and cache the result.
+ b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve the source: %w", err)
+ }
+ r.blobCacheMu.Lock()
+ cachedB, done, added := r.blobCache.Add(name, b)
+ r.blobCacheMu.Unlock()
+ if !added {
+ b.Close() // blob already exists in the cache. discard this.
+ }
+ return &blobRef{cachedB.(remote.Blob), done}, nil
+}
+
+func newLayer(
+ resolver *Resolver,
+ desc ocispec.Descriptor,
+ blob *blobRef,
+ vr *reader.VerifiableReader,
+) *layer {
+ return &layer{
+ resolver: resolver,
+ desc: desc,
+ blob: blob,
+ verifiableReader: vr,
+ prefetchWaiter: newWaiter(),
+ }
+}
+
+type layer struct {
+ resolver *Resolver
+ desc ocispec.Descriptor
+ blob *blobRef
+ verifiableReader *reader.VerifiableReader
+ prefetchWaiter *waiter
+
+ prefetchSize int64
+ prefetchSizeMu sync.Mutex
+
+ r reader.Reader
+
+ closed bool
+ closedMu sync.Mutex
+
+ prefetchOnce sync.Once
+ backgroundFetchOnce sync.Once
+}
+
+func (l *layer) Info() Info {
+ var readTime time.Time
+ if l.r != nil {
+ readTime = l.r.LastOnDemandReadTime()
+ }
+ return Info{
+ Digest: l.desc.Digest,
+ Size: l.blob.Size(),
+ FetchedSize: l.blob.FetchedSize(),
+ PrefetchSize: l.prefetchedSize(),
+ ReadTime: readTime,
+ TOCDigest: l.verifiableReader.Metadata().TOCDigest(),
+ }
+}
+
+func (l *layer) prefetchedSize() int64 {
+ l.prefetchSizeMu.Lock()
+ sz := l.prefetchSize
+ l.prefetchSizeMu.Unlock()
+ return sz
+}
+
+func (l *layer) Check() error {
+ if l.isClosed() {
+ return fmt.Errorf("layer is already closed")
+ }
+ return l.blob.Check()
+}
+
+func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
+ if l.isClosed() {
+ return fmt.Errorf("layer is already closed")
+ }
+ return l.blob.Refresh(ctx, hosts, refspec, desc)
+}
+
+func (l *layer) Verify(tocDigest digest.Digest) (err error) {
+ if l.isClosed() {
+ return fmt.Errorf("layer is already closed")
+ }
+ if l.r != nil {
+ return nil
+ }
+ l.r, err = l.verifiableReader.VerifyTOC(tocDigest)
+ return
+}
+
+func (l *layer) SkipVerify() {
+ if l.r != nil {
+ return
+ }
+ l.r = l.verifiableReader.SkipVerify()
+}
+
+func (l *layer) Prefetch(prefetchSize int64) (err error) {
+ l.prefetchOnce.Do(func() {
+ ctx := context.Background()
+ l.resolver.backgroundTaskManager.DoPrioritizedTask()
+ defer l.resolver.backgroundTaskManager.DonePrioritizedTask()
+ err = l.prefetch(ctx, prefetchSize)
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest)
+ return
+ }
+ log.G(ctx).Debug("completed to prefetch")
+ })
+ return
+}
+
+func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error {
+ defer l.prefetchWaiter.done() // Notify the completion
+ // Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later)
+ start := time.Now()
+ defer func() {
+ commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize())
+ }()
+
+ if l.isClosed() {
+ return fmt.Errorf("layer is already closed")
+ }
+ rootID := l.verifiableReader.Metadata().RootID()
+ if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil {
+ // do not prefetch this layer
+ return nil
+ } else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil {
+ offset, err := l.verifiableReader.Metadata().GetOffset(id)
+ if err != nil {
+ return fmt.Errorf("failed to get offset of prefetch landmark: %w", err)
+ }
+ // override the prefetch size with optimized value
+ prefetchSize = offset
+ } else if prefetchSize > l.blob.Size() {
+ // adjust prefetch size not to exceed the whole layer size
+ prefetchSize = l.blob.Size()
+ }
+
+ // Fetch the target range
+ downloadStart := time.Now()
+ err := l.blob.Cache(0, prefetchSize)
+ commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data
+
+ if err != nil {
+ return fmt.Errorf("failed to prefetch layer: %w", err)
+ }
+
+ // Set prefetch size for metrics after prefetch completed
+ l.prefetchSizeMu.Lock()
+ l.prefetchSize = prefetchSize
+ l.prefetchSizeMu.Unlock()
+
+ // Cache uncompressed contents of the prefetched range
+ decompressStart := time.Now()
+ err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool {
+ return offset < prefetchSize // Cache only prefetch target
+ }))
+ commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data
+ if err != nil {
+ return fmt.Errorf("failed to cache prefetched layer: %w", err)
+ }
+
+ return nil
+}
+
+func (l *layer) WaitForPrefetchCompletion() error {
+ if l.isClosed() {
+ return fmt.Errorf("layer is already closed")
+ }
+ return l.prefetchWaiter.wait(l.resolver.prefetchTimeout)
+}
+
+func (l *layer) BackgroundFetch() (err error) {
+ l.backgroundFetchOnce.Do(func() {
+ ctx := context.Background()
+ err = l.backgroundFetch(ctx)
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest)
+ return
+ }
+ log.G(ctx).Debug("completed to fetch all layer data in background")
+ })
+ return
+}
+
+func (l *layer) backgroundFetch(ctx context.Context) error {
+ defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now())
+ if l.isClosed() {
+ return fmt.Errorf("layer is already closed")
+ }
+ br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) {
+ l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) {
+ // Measuring the time to download background fetch data (in milliseconds)
+ defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data
+ retN, retErr = l.blob.ReadAt(
+ p,
+ offset,
+ remote.WithContext(ctx), // Make cancellable
+ remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache
+ )
+ }, 120*time.Second)
+ return
+ }), 0, l.blob.Size())
+ defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds)
+ return l.verifiableReader.Cache(
+ reader.WithReader(br), // Read contents in background
+ reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache
+ )
+}
+
+func (l *layerRef) Done() {
+ l.done()
+}
+
+func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) {
+ if l.isClosed() {
+ return nil, fmt.Errorf("layer is already closed")
+ }
+ if l.r == nil {
+ return nil, fmt.Errorf("layer hasn't been verified yet")
+ }
+ return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType)
+}
+
+func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) {
+ return l.blob.ReadAt(p, offset, opts...)
+}
+
+func (l *layer) close() error {
+ l.closedMu.Lock()
+ defer l.closedMu.Unlock()
+ if l.closed {
+ return nil
+ }
+ l.closed = true
+ defer l.blob.done() // Close reader first, then close the blob
+ l.verifiableReader.Close()
+ if l.r != nil {
+ return l.r.Close()
+ }
+ return nil
+}
+
+func (l *layer) isClosed() bool {
+ l.closedMu.Lock()
+ closed := l.closed
+ l.closedMu.Unlock()
+ return closed
+}
+
+// blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter
+// of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound
+// to this blob will be discarded.
+type blobRef struct {
+ remote.Blob
+ done func()
+}
+
+// layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the
+// reference counter of this blob in the underlying cache. When nobody refers to the layer in the
+// cache, resources bound to this layer will be discarded.
+type layerRef struct {
+ *layer
+ done func()
+}
+
+func newWaiter() *waiter {
+ return &waiter{
+ completionCond: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+type waiter struct {
+ isDone bool
+ isDoneMu sync.Mutex
+ completionCond *sync.Cond
+}
+
+func (w *waiter) done() {
+ w.isDoneMu.Lock()
+ w.isDone = true
+ w.isDoneMu.Unlock()
+ w.completionCond.Broadcast()
+}
+
+func (w *waiter) wait(timeout time.Duration) error {
+ wait := func() <-chan struct{} {
+ ch := make(chan struct{})
+ go func() {
+ w.isDoneMu.Lock()
+ isDone := w.isDone
+ w.isDoneMu.Unlock()
+
+ w.completionCond.L.Lock()
+ if !isDone {
+ w.completionCond.Wait()
+ }
+ w.completionCond.L.Unlock()
+ ch <- struct{}{}
+ }()
+ return ch
+ }
+ select {
+ case <-time.After(timeout):
+ w.isDoneMu.Lock()
+ w.isDone = true
+ w.isDoneMu.Unlock()
+ w.completionCond.Broadcast()
+ return fmt.Errorf("timeout(%v)", timeout)
+ case <-wait():
+ return nil
+ }
+}
+
+type readerAtFunc func([]byte, int64) (int, error)
+
+func (f readerAtFunc) ReadAt(p []byte, offset int64) (int, error) { return f(p, offset) }
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go
new file mode 100644
index 000000000000..b6306b9b5955
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go
@@ -0,0 +1,806 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package layer
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common"
+ "github.com/containerd/stargz-snapshotter/fs/reader"
+ "github.com/containerd/stargz-snapshotter/fs/remote"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ fusefs "github.com/hanwen/go-fuse/v2/fs"
+ "github.com/hanwen/go-fuse/v2/fuse"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ blockSize = 4096
+ physicalBlockSize = 512
+ // physicalBlockRatio is the ratio of blockSize to physicalBlockSize.
+ // It can be used to convert from # blockSize-byte blocks to # physicalBlockSize-byte blocks
+ physicalBlockRatio = blockSize / physicalBlockSize
+ whiteoutPrefix = ".wh."
+ whiteoutOpaqueDir = whiteoutPrefix + whiteoutPrefix + ".opq"
+ opaqueXattrValue = "y"
+ stateDirName = ".stargz-snapshotter"
+ statFileMode = syscall.S_IFREG | 0400 // -r--------
+ stateDirMode = syscall.S_IFDIR | 0500 // dr-x------
+)
+
+type OverlayOpaqueType int
+
+const (
+ OverlayOpaqueAll OverlayOpaqueType = iota
+ OverlayOpaqueTrusted
+ OverlayOpaqueUser
+)
+
+var opaqueXattrs = map[OverlayOpaqueType][]string{
+ OverlayOpaqueAll: {"trusted.overlay.opaque", "user.overlay.opaque"},
+ OverlayOpaqueTrusted: {"trusted.overlay.opaque"},
+ OverlayOpaqueUser: {"user.overlay.opaque"},
+}
+
+func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseInode uint32, opaque OverlayOpaqueType) (fusefs.InodeEmbedder, error) {
+ rootID := r.Metadata().RootID()
+ rootAttr, err := r.Metadata().GetAttr(rootID)
+ if err != nil {
+ return nil, err
+ }
+ opq, ok := opaqueXattrs[opaque]
+ if !ok {
+ return nil, fmt.Errorf("Unknown overlay opaque type")
+ }
+ ffs := &fs{
+ r: r,
+ layerDigest: layerDgst,
+ baseInode: baseInode,
+ rootID: rootID,
+ opaqueXattrs: opq,
+ }
+ ffs.s = ffs.newState(layerDgst, blob)
+ return &node{
+ id: rootID,
+ attr: rootAttr,
+ fs: ffs,
+ }, nil
+}
+
+// fs contains global metadata used by nodes
+type fs struct {
+ r reader.Reader
+ s *state
+ layerDigest digest.Digest
+ baseInode uint32
+ rootID uint32
+ opaqueXattrs []string
+}
+
+func (fs *fs) inodeOfState() uint64 {
+ return (uint64(fs.baseInode) << 32) | 1 // reserved
+}
+
+func (fs *fs) inodeOfStatFile() uint64 {
+ return (uint64(fs.baseInode) << 32) | 2 // reserved
+}
+
+func (fs *fs) inodeOfID(id uint32) (uint64, error) {
+ // 0 is reserved by go-fuse 1 and 2 are reserved by the state dir
+ if id > ^uint32(0)-3 {
+ return 0, fmt.Errorf("too many inodes")
+ }
+ return (uint64(fs.baseInode) << 32) | uint64(3+id), nil
+}
+
+// node is a filesystem inode abstraction.
+type node struct {
+ fusefs.Inode
+ fs *fs
+ id uint32
+ attr metadata.Attr
+
+ ents []fuse.DirEntry
+ entsCached bool
+ entsMu sync.Mutex
+}
+
+func (n *node) isRootNode() bool {
+ return n.id == n.fs.rootID
+}
+
+func (n *node) isOpaque() bool {
+ if _, _, err := n.fs.r.Metadata().GetChild(n.id, whiteoutOpaqueDir); err == nil {
+ return true
+ }
+ return false
+}
+
+var _ = (fusefs.InodeEmbedder)((*node)(nil))
+
+var _ = (fusefs.NodeReaddirer)((*node)(nil))
+
+func (n *node) Readdir(ctx context.Context) (fusefs.DirStream, syscall.Errno) {
+ ents, errno := n.readdir()
+ if errno != 0 {
+ return nil, errno
+ }
+ return fusefs.NewListDirStream(ents), 0
+}
+
+func (n *node) readdir() ([]fuse.DirEntry, syscall.Errno) {
+ // Measure how long node_readdir operation takes (in microseconds).
+ start := time.Now() // set start time
+ defer commonmetrics.MeasureLatencyInMicroseconds(commonmetrics.NodeReaddir, n.fs.layerDigest, start)
+
+ n.entsMu.Lock()
+ if n.entsCached {
+ ents := n.ents
+ n.entsMu.Unlock()
+ return ents, 0
+ }
+ n.entsMu.Unlock()
+
+ isRoot := n.isRootNode()
+
+ var ents []fuse.DirEntry
+ whiteouts := map[string]uint32{}
+ normalEnts := map[string]bool{}
+ var lastErr error
+ if err := n.fs.r.Metadata().ForeachChild(n.id, func(name string, id uint32, mode os.FileMode) bool {
+
+ // We don't want to show prefetch landmarks in "/".
+ if isRoot && (name == estargz.PrefetchLandmark || name == estargz.NoPrefetchLandmark) {
+ return true
+ }
+
+ // We don't want to show whiteouts.
+ if strings.HasPrefix(name, whiteoutPrefix) {
+ if name == whiteoutOpaqueDir {
+ return true
+ }
+ // Add the overlayfs-compiant whiteout later.
+ whiteouts[name] = id
+ return true
+ }
+
+ // This is a normal entry.
+ normalEnts[name] = true
+ ino, err := n.fs.inodeOfID(id)
+ if err != nil {
+ lastErr = err
+ return false
+ }
+ ents = append(ents, fuse.DirEntry{
+ Mode: fileModeToSystemMode(mode),
+ Name: name,
+ Ino: ino,
+ })
+ return true
+ }); err != nil || lastErr != nil {
+ n.fs.s.report(fmt.Errorf("node.Readdir: err = %v; lastErr = %v", err, lastErr))
+ return nil, syscall.EIO
+ }
+
+ // Append whiteouts if no entry replaces the target entry in the lower layer.
+ for w, id := range whiteouts {
+ if !normalEnts[w[len(whiteoutPrefix):]] {
+ ino, err := n.fs.inodeOfID(id)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Readdir: err = %v; lastErr = %v", err, lastErr))
+ return nil, syscall.EIO
+ }
+ ents = append(ents, fuse.DirEntry{
+ Mode: syscall.S_IFCHR,
+ Name: w[len(whiteoutPrefix):],
+ Ino: ino,
+ })
+
+ }
+ }
+
+ // Avoid undeterministic order of entries on each call
+ sort.Slice(ents, func(i, j int) bool {
+ return ents[i].Name < ents[j].Name
+ })
+ n.entsMu.Lock()
+ defer n.entsMu.Unlock()
+ n.ents, n.entsCached = ents, true // cache it
+
+ return ents, 0
+}
+
+var _ = (fusefs.NodeLookuper)((*node)(nil))
+
+func (n *node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fusefs.Inode, syscall.Errno) {
+
+ isRoot := n.isRootNode()
+
+ // We don't want to show prefetch landmarks in "/".
+ if isRoot && (name == estargz.PrefetchLandmark || name == estargz.NoPrefetchLandmark) {
+ return nil, syscall.ENOENT
+ }
+
+ // We don't want to show whiteouts.
+ if strings.HasPrefix(name, whiteoutPrefix) {
+ return nil, syscall.ENOENT
+ }
+
+ // state directory
+ if isRoot && name == stateDirName {
+ return n.NewInode(ctx, n.fs.s, n.fs.stateToAttr(&out.Attr)), 0
+ }
+
+ // lookup on memory nodes
+ if cn := n.GetChild(name); cn != nil {
+ switch tn := cn.Operations().(type) {
+ case *node:
+ ino, err := n.fs.inodeOfID(tn.id)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Lookup: %v", err))
+ return nil, syscall.EIO
+ }
+ entryToAttr(ino, tn.attr, &out.Attr)
+ case *whiteout:
+ ino, err := n.fs.inodeOfID(tn.id)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Lookup: %v", err))
+ return nil, syscall.EIO
+ }
+ entryToAttr(ino, tn.attr, &out.Attr)
+ default:
+ n.fs.s.report(fmt.Errorf("node.Lookup: uknown node type detected"))
+ return nil, syscall.EIO
+ }
+ return cn, 0
+ }
+
+ // early return if this entry doesn't exist
+ n.entsMu.Lock()
+ if n.entsCached {
+ var found bool
+ for _, e := range n.ents {
+ if e.Name == name {
+ found = true
+ }
+ }
+ if !found {
+ n.entsMu.Unlock()
+ return nil, syscall.ENOENT
+ }
+ }
+ n.entsMu.Unlock()
+
+ id, ce, err := n.fs.r.Metadata().GetChild(n.id, name)
+ if err != nil {
+ // If the entry exists as a whiteout, show an overlayfs-styled whiteout node.
+ if whID, wh, err := n.fs.r.Metadata().GetChild(n.id, fmt.Sprintf("%s%s", whiteoutPrefix, name)); err == nil {
+ ino, err := n.fs.inodeOfID(whID)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Lookup: %v", err))
+ return nil, syscall.EIO
+ }
+ return n.NewInode(ctx, &whiteout{
+ id: whID,
+ fs: n.fs,
+ attr: wh,
+ }, entryToWhAttr(ino, wh, &out.Attr)), 0
+ }
+ n.readdir() // This code path is very expensive. Cache child entries here so that the next call don't reach here.
+ return nil, syscall.ENOENT
+ }
+
+ ino, err := n.fs.inodeOfID(id)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Lookup: %v", err))
+ return nil, syscall.EIO
+ }
+ return n.NewInode(ctx, &node{
+ id: id,
+ fs: n.fs,
+ attr: ce,
+ }, entryToAttr(ino, ce, &out.Attr)), 0
+}
+
+var _ = (fusefs.NodeOpener)((*node)(nil))
+
+func (n *node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
+ ra, err := n.fs.r.OpenFile(n.id)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Open: %v", err))
+ return nil, 0, syscall.EIO
+ }
+ return &file{
+ n: n,
+ ra: ra,
+ }, fuse.FOPEN_KEEP_CACHE, 0
+}
+
+var _ = (fusefs.NodeGetattrer)((*node)(nil))
+
+func (n *node) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno {
+ ino, err := n.fs.inodeOfID(n.id)
+ if err != nil {
+ n.fs.s.report(fmt.Errorf("node.Getattr: %v", err))
+ return syscall.EIO
+ }
+ entryToAttr(ino, n.attr, &out.Attr)
+ return 0
+}
+
+var _ = (fusefs.NodeGetxattrer)((*node)(nil))
+
+func (n *node) Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno) {
+ ent := n.attr
+ opq := n.isOpaque()
+ for _, opaqueXattr := range n.fs.opaqueXattrs {
+ if attr == opaqueXattr && opq {
+ // This node is an opaque directory so give overlayfs-compliant indicator.
+ if len(dest) < len(opaqueXattrValue) {
+ return uint32(len(opaqueXattrValue)), syscall.ERANGE
+ }
+ return uint32(copy(dest, opaqueXattrValue)), 0
+ }
+ }
+ if v, ok := ent.Xattrs[attr]; ok {
+ if len(dest) < len(v) {
+ return uint32(len(v)), syscall.ERANGE
+ }
+ return uint32(copy(dest, v)), 0
+ }
+ return 0, syscall.ENODATA
+}
+
+var _ = (fusefs.NodeListxattrer)((*node)(nil))
+
+func (n *node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) {
+ ent := n.attr
+ opq := n.isOpaque()
+ var attrs []byte
+ if opq {
+ // This node is an opaque directory so add overlayfs-compliant indicator.
+ for _, opaqueXattr := range n.fs.opaqueXattrs {
+ attrs = append(attrs, []byte(opaqueXattr+"\x00")...)
+ }
+ }
+ for k := range ent.Xattrs {
+ attrs = append(attrs, []byte(k+"\x00")...)
+ }
+ if len(dest) < len(attrs) {
+ return uint32(len(attrs)), syscall.ERANGE
+ }
+ return uint32(copy(dest, attrs)), 0
+}
+
+var _ = (fusefs.NodeReadlinker)((*node)(nil))
+
+func (n *node) Readlink(ctx context.Context) ([]byte, syscall.Errno) {
+ ent := n.attr
+ return []byte(ent.LinkName), 0
+}
+
+var _ = (fusefs.NodeStatfser)((*node)(nil))
+
+func (n *node) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno {
+ defaultStatfs(out)
+ return 0
+}
+
+// file is a file abstraction which implements file handle in go-fuse.
+type file struct {
+ n *node
+ ra io.ReaderAt
+}
+
+var _ = (fusefs.FileReader)((*file)(nil))
+
+func (f *file) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
+ defer commonmetrics.MeasureLatencyInMicroseconds(commonmetrics.ReadOnDemand, f.n.fs.layerDigest, time.Now()) // measure time for on-demand file reads (in microseconds)
+ defer commonmetrics.IncOperationCount(commonmetrics.OnDemandReadAccessCount, f.n.fs.layerDigest) // increment the counter for on-demand file accesses
+ n, err := f.ra.ReadAt(dest, off)
+ if err != nil && err != io.EOF {
+ f.n.fs.s.report(fmt.Errorf("file.Read: %v", err))
+ return nil, syscall.EIO
+ }
+ return fuse.ReadResultData(dest[:n]), 0
+}
+
+var _ = (fusefs.FileGetattrer)((*file)(nil))
+
+func (f *file) Getattr(ctx context.Context, out *fuse.AttrOut) syscall.Errno {
+ ino, err := f.n.fs.inodeOfID(f.n.id)
+ if err != nil {
+ f.n.fs.s.report(fmt.Errorf("file.Getattr: %v", err))
+ return syscall.EIO
+ }
+ entryToAttr(ino, f.n.attr, &out.Attr)
+ return 0
+}
+
+// whiteout is a whiteout abstraction compliant to overlayfs.
+type whiteout struct {
+ fusefs.Inode
+ id uint32
+ fs *fs
+ attr metadata.Attr
+}
+
+var _ = (fusefs.NodeGetattrer)((*whiteout)(nil))
+
+func (w *whiteout) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno {
+ ino, err := w.fs.inodeOfID(w.id)
+ if err != nil {
+ w.fs.s.report(fmt.Errorf("whiteout.Getattr: %v", err))
+ return syscall.EIO
+ }
+ entryToWhAttr(ino, w.attr, &out.Attr)
+ return 0
+}
+
+var _ = (fusefs.NodeStatfser)((*whiteout)(nil))
+
+func (w *whiteout) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno {
+ defaultStatfs(out)
+ return 0
+}
+
+// newState provides new state directory node.
+// It creates statFile at the same time to give it stable inode number.
+func (fs *fs) newState(layerDigest digest.Digest, blob remote.Blob) *state {
+ return &state{
+ statFile: &statFile{
+ name: layerDigest.String() + ".json",
+ statJSON: statJSON{
+ Digest: layerDigest.String(),
+ Size: blob.Size(),
+ },
+ blob: blob,
+ fs: fs,
+ },
+ fs: fs,
+ }
+}
+
+// state is a directory which contain a "state file" of this layer aiming to
+// observability. This filesystem uses it to report something(e.g. error) to
+// the clients(e.g. Kubernetes's livenessProbe).
+// This directory has mode "dr-x------ root root".
+type state struct {
+ fusefs.Inode
+ statFile *statFile
+ fs *fs
+}
+
+var _ = (fusefs.NodeReaddirer)((*state)(nil))
+
+func (s *state) Readdir(ctx context.Context) (fusefs.DirStream, syscall.Errno) {
+ return fusefs.NewListDirStream([]fuse.DirEntry{
+ {
+ Mode: statFileMode,
+ Name: s.statFile.name,
+ Ino: s.fs.inodeOfStatFile(),
+ },
+ }), 0
+}
+
+var _ = (fusefs.NodeLookuper)((*state)(nil))
+
+func (s *state) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fusefs.Inode, syscall.Errno) {
+ if name != s.statFile.name {
+ return nil, syscall.ENOENT
+ }
+ attr, errno := s.statFile.attr(&out.Attr)
+ if errno != 0 {
+ return nil, errno
+ }
+ return s.NewInode(ctx, s.statFile, attr), 0
+}
+
+var _ = (fusefs.NodeGetattrer)((*state)(nil))
+
+func (s *state) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno {
+ s.fs.stateToAttr(&out.Attr)
+ return 0
+}
+
+var _ = (fusefs.NodeStatfser)((*state)(nil))
+
+func (s *state) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno {
+ defaultStatfs(out)
+ return 0
+}
+
+func (s *state) report(err error) {
+ s.statFile.report(err)
+}
+
+type statJSON struct {
+ Error string `json:"error,omitempty"`
+ Digest string `json:"digest"`
+ // URL is excluded for potential security reason
+ Size int64 `json:"size"`
+ FetchedSize int64 `json:"fetchedSize"`
+ FetchedPercent float64 `json:"fetchedPercent"` // Fetched / Size * 100.0
+}
+
+// statFile is a file which contain something to be reported from this layer.
+// This filesystem uses statFile.report() to report something(e.g. error) to
+// the clients(e.g. Kubernetes's livenessProbe).
+// This file has mode "-r-------- root root".
+type statFile struct {
+ fusefs.Inode
+ name string
+ blob remote.Blob
+ statJSON statJSON
+ mu sync.Mutex
+ fs *fs
+}
+
+var _ = (fusefs.NodeOpener)((*statFile)(nil))
+
+func (sf *statFile) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
+ return nil, 0, 0
+}
+
+var _ = (fusefs.NodeReader)((*statFile)(nil))
+
+func (sf *statFile) Read(ctx context.Context, f fusefs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
+ sf.mu.Lock()
+ defer sf.mu.Unlock()
+ st, err := sf.updateStatUnlocked()
+ if err != nil {
+ return nil, syscall.EIO
+ }
+ n, err := bytes.NewReader(st).ReadAt(dest, off)
+ if err != nil && err != io.EOF {
+ return nil, syscall.EIO
+ }
+ return fuse.ReadResultData(dest[:n]), 0
+}
+
+var _ = (fusefs.NodeGetattrer)((*statFile)(nil))
+
+func (sf *statFile) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno {
+ _, errno := sf.attr(&out.Attr)
+ return errno
+}
+
+var _ = (fusefs.NodeStatfser)((*statFile)(nil))
+
+func (sf *statFile) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno {
+ defaultStatfs(out)
+ return 0
+}
+
+// logContents puts the contents of statFile in the log
+// to keep that information accessible for troubleshooting.
+// The entries naming is kept to be consistend with the field naming in statJSON.
+func (sf *statFile) logContents() {
+ ctx := context.Background()
+ log.G(ctx).WithFields(log.Fields{
+ "digest": sf.statJSON.Digest, "size": sf.statJSON.Size,
+ "fetchedSize": sf.statJSON.FetchedSize, "fetchedPercent": sf.statJSON.FetchedPercent,
+ }).WithError(errors.New(sf.statJSON.Error)).Error("statFile error")
+}
+
+func (sf *statFile) report(err error) {
+ sf.mu.Lock()
+ defer sf.mu.Unlock()
+ sf.statJSON.Error = err.Error()
+ sf.logContents()
+}
+
+func (sf *statFile) attr(out *fuse.Attr) (fusefs.StableAttr, syscall.Errno) {
+ sf.mu.Lock()
+ defer sf.mu.Unlock()
+
+ st, err := sf.updateStatUnlocked()
+ if err != nil {
+ return fusefs.StableAttr{}, syscall.EIO
+ }
+
+ return sf.fs.statFileToAttr(uint64(len(st)), out), 0
+}
+
+func (sf *statFile) updateStatUnlocked() ([]byte, error) {
+ sf.statJSON.FetchedSize = sf.blob.FetchedSize()
+ sf.statJSON.FetchedPercent = float64(sf.statJSON.FetchedSize) / float64(sf.statJSON.Size) * 100.0
+ j, err := json.Marshal(&sf.statJSON)
+ if err != nil {
+ return nil, err
+ }
+ j = append(j, []byte("\n")...)
+ return j, nil
+}
+
+// entryToAttr converts metadata.Attr to go-fuse's Attr.
+func entryToAttr(ino uint64, e metadata.Attr, out *fuse.Attr) fusefs.StableAttr {
+ out.Ino = ino
+ out.Size = uint64(e.Size)
+ if e.Mode&os.ModeSymlink != 0 {
+ out.Size = uint64(len(e.LinkName))
+ }
+ out.Blksize = blockSize
+ out.Blocks = (out.Size + uint64(out.Blksize) - 1) / uint64(out.Blksize) * physicalBlockRatio
+ mtime := e.ModTime
+ out.SetTimes(nil, &mtime, nil)
+ out.Mode = fileModeToSystemMode(e.Mode)
+ out.Owner = fuse.Owner{Uid: uint32(e.UID), Gid: uint32(e.GID)}
+ out.Rdev = uint32(unix.Mkdev(uint32(e.DevMajor), uint32(e.DevMinor)))
+ out.Nlink = uint32(e.NumLink)
+ if out.Nlink == 0 {
+ out.Nlink = 1 // zero "NumLink" means one.
+ }
+ out.Padding = 0 // TODO
+
+ return fusefs.StableAttr{
+ Mode: out.Mode,
+ Ino: out.Ino,
+ // NOTE: The inode number is unique throughout the lifetime of
+ // this filesystem so we don't consider about generation at this
+ // moment.
+ }
+}
+
+// entryToWhAttr converts metadata.Attr to go-fuse's Attr of whiteouts.
+func entryToWhAttr(ino uint64, e metadata.Attr, out *fuse.Attr) fusefs.StableAttr {
+ out.Ino = ino
+ out.Size = 0
+ out.Blksize = blockSize
+ out.Blocks = 0
+ mtime := e.ModTime
+ out.SetTimes(nil, &mtime, nil)
+ out.Mode = syscall.S_IFCHR
+ out.Owner = fuse.Owner{Uid: 0, Gid: 0}
+ out.Rdev = uint32(unix.Mkdev(0, 0))
+ out.Nlink = 1
+ out.Padding = 0 // TODO
+
+ return fusefs.StableAttr{
+ Mode: out.Mode,
+ Ino: out.Ino,
+ // NOTE: The inode number is unique throughout the lifetime of
+ // this filesystem so we don't consider about generation at this
+ // moment.
+ }
+}
+
+// stateToAttr converts state directory to go-fuse's Attr.
+func (fs *fs) stateToAttr(out *fuse.Attr) fusefs.StableAttr {
+ out.Ino = fs.inodeOfState()
+ out.Size = 0
+ out.Blksize = blockSize
+ out.Blocks = 0
+ out.Nlink = 1
+
+ // root can read and open it (dr-x------ root root).
+ out.Mode = stateDirMode
+ out.Owner = fuse.Owner{Uid: 0, Gid: 0}
+
+ // dummy
+ out.Mtime = 0
+ out.Mtimensec = 0
+ out.Rdev = 0
+ out.Padding = 0
+
+ return fusefs.StableAttr{
+ Mode: out.Mode,
+ Ino: out.Ino,
+ // NOTE: The inode number is unique throughout the lifetime of
+ // this filesystem so we don't consider about generation at this
+ // moment.
+ }
+}
+
+// statFileToAttr converts stat file to go-fuse's Attr.
+// func statFileToAttr(id uint64, sf *statFile, size uint64, out *fuse.Attr) fusefs.StableAttr {
+func (fs *fs) statFileToAttr(size uint64, out *fuse.Attr) fusefs.StableAttr {
+ out.Ino = fs.inodeOfStatFile()
+ out.Size = size
+ out.Blksize = blockSize
+ out.Blocks = (out.Size + uint64(out.Blksize) - 1) / uint64(out.Blksize) * physicalBlockRatio
+ out.Nlink = 1
+
+ // Root can read it ("-r-------- root root").
+ out.Mode = statFileMode
+ out.Owner = fuse.Owner{Uid: 0, Gid: 0}
+
+ // dummy
+ out.Mtime = 0
+ out.Mtimensec = 0
+ out.Rdev = 0
+ out.Padding = 0
+
+ return fusefs.StableAttr{
+ Mode: out.Mode,
+ Ino: out.Ino,
+ // NOTE: The inode number is unique throughout the lifetime of
+ // this filesystem so we don't consider about generation at this
+ // moment.
+ }
+}
+
+func fileModeToSystemMode(m os.FileMode) uint32 {
+ // Permission bits
+ res := uint32(m & os.ModePerm)
+
+ // File type bits
+ switch m & os.ModeType {
+ case os.ModeDevice:
+ res |= syscall.S_IFBLK
+ case os.ModeDevice | os.ModeCharDevice:
+ res |= syscall.S_IFCHR
+ case os.ModeDir:
+ res |= syscall.S_IFDIR
+ case os.ModeNamedPipe:
+ res |= syscall.S_IFIFO
+ case os.ModeSymlink:
+ res |= syscall.S_IFLNK
+ case os.ModeSocket:
+ res |= syscall.S_IFSOCK
+ default: // regular file.
+ res |= syscall.S_IFREG
+ }
+
+ // suid, sgid, sticky bits
+ if m&os.ModeSetuid != 0 {
+ res |= syscall.S_ISUID
+ }
+ if m&os.ModeSetgid != 0 {
+ res |= syscall.S_ISGID
+ }
+ if m&os.ModeSticky != 0 {
+ res |= syscall.S_ISVTX
+ }
+
+ return res
+}
+
+func defaultStatfs(stat *fuse.StatfsOut) {
+
+ // http://man7.org/linux/man-pages/man2/statfs.2.html
+ stat.Blocks = 0 // dummy
+ stat.Bfree = 0
+ stat.Bavail = 0
+ stat.Files = 0 // dummy
+ stat.Ffree = 0
+ stat.Bsize = blockSize
+ stat.NameLen = 1<<32 - 1
+ stat.Frsize = blockSize
+ stat.Padding = 0
+ stat.Spare = [6]uint32{}
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go
new file mode 100644
index 000000000000..5a38825d004d
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go
@@ -0,0 +1,1192 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package layer
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/stargz-snapshotter/cache"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/containerd/stargz-snapshotter/fs/reader"
+ "github.com/containerd/stargz-snapshotter/fs/remote"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ "github.com/containerd/stargz-snapshotter/task"
+ tutil "github.com/containerd/stargz-snapshotter/util/testutil"
+ fusefs "github.com/hanwen/go-fuse/v2/fs"
+ "github.com/hanwen/go-fuse/v2/fuse"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sampleChunkSize = 3
+ sampleData1 = "0123456789"
+ sampleData2 = "abcdefghij"
+)
+
+var srcCompressions = map[string]tutil.CompressionFactory{
+ "zstd-fastest": tutil.ZstdCompressionWithLevel(zstd.SpeedFastest),
+ "gzip-bestspeed": tutil.GzipCompressionWithLevel(gzip.BestSpeed),
+ "externaltoc-gzip-bestspeed": tutil.ExternalTOCGzipCompressionWithLevel(gzip.BestSpeed),
+}
+
+func TestSuiteLayer(t *testing.T, store metadata.Store) {
+ testPrefetch(t, store)
+ testNodeRead(t, store)
+ testNodes(t, store)
+}
+
+var testStateLayerDigest = digest.FromString("dummy")
+
+func testPrefetch(t *testing.T, factory metadata.Store) {
+ data64KB := string(tutil.RandomBytes(t, 64000))
+ defaultPrefetchSize := int64(10000)
+ landmarkPosition := func(t *testing.T, l *layer) int64 {
+ if l.r == nil {
+ t.Fatalf("layer hasn't been verified yet")
+ }
+ if id, _, err := l.r.Metadata().GetChild(l.r.Metadata().RootID(), estargz.PrefetchLandmark); err == nil {
+ offset, err := l.r.Metadata().GetOffset(id)
+ if err != nil {
+ t.Fatalf("failed to get offset of prefetch landmark")
+ }
+ return offset
+ }
+ return defaultPrefetchSize
+ }
+ tests := []struct {
+ name string
+ chunkSize int // default is "sampleChunkSize"
+ minChunkSize int
+ in []tutil.TarEntry
+ wantNum int // number of chunks wanted in the cache
+ wants []string // filenames to compare
+ prefetchSize func(*testing.T, *layer) int64
+ prioritizedFiles []string
+ }{
+ {
+ name: "no_prefetch",
+ in: []tutil.TarEntry{
+ tutil.File("foo.txt", sampleData1),
+ },
+ wantNum: 0,
+ prioritizedFiles: nil,
+ },
+ {
+ name: "prefetch",
+ in: []tutil.TarEntry{
+ tutil.File("foo.txt", sampleData1),
+ tutil.File("bar.txt", sampleData2),
+ },
+ wantNum: chunkNum(sampleData1),
+ wants: []string{"foo.txt"},
+ prefetchSize: landmarkPosition,
+ prioritizedFiles: []string{"foo.txt"},
+ },
+ {
+ name: "with_dir",
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/bar.txt", sampleData1),
+ tutil.Dir("buz/"),
+ tutil.File("buz/buzbuz.txt", sampleData2),
+ },
+ wantNum: chunkNum(sampleData1),
+ wants: []string{"foo/bar.txt"},
+ prefetchSize: landmarkPosition,
+ prioritizedFiles: []string{"foo/", "foo/bar.txt"},
+ },
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ chunkSize: 1000000000, // do not chunk
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/foo1", data64KB),
+ tutil.File("foo2", "bb"),
+ tutil.File("foo22", "ccc"),
+ tutil.Dir("bar/"),
+ tutil.File("bar/bar.txt", "aaa"),
+ tutil.File("foo3", data64KB),
+ },
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ // landmark+dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ wantNum: 5, // foo1 + foo2 + foo22 + bar.txt + foo3
+ wants: []string{"foo/foo1", "foo2", "foo22", "bar/bar.txt", "foo3"},
+ prefetchSize: landmarkPosition,
+ prioritizedFiles: []string{"foo/", "foo/foo1", "foo2", "foo22", "bar/", "bar/bar.txt", "foo3"},
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/foo1", data64KB),
+ tutil.File("foo2", "bb"),
+ tutil.Dir("bar/"),
+ tutil.File("foo3", data64KB),
+ },
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ // landmark+dir+foo1(1), foo1(2), foo2, dir+foo3(1), foo3(2), TOC, footer
+ wantNum: 3, // foo1(2) + foo2 (foo3(1) shouldn't be in a separated stream)
+ wants: []string{"foo/foo1", "foo2"},
+ prefetchSize: landmarkPosition,
+ prioritizedFiles: []string{"foo/", "foo/foo1", "foo2"},
+ },
+ }
+
+ for _, tt := range tests {
+ for srcCompressionName, srcCompression := range srcCompressions {
+ cl := srcCompression()
+ t.Run("testPrefetch-"+tt.name+"-"+srcCompressionName, func(t *testing.T) {
+ chunkSize := sampleChunkSize
+ if tt.chunkSize > 0 {
+ chunkSize = tt.chunkSize
+ }
+ minChunkSize := 0
+ if tt.minChunkSize > 0 {
+ minChunkSize = tt.minChunkSize
+ }
+ sr, dgst, err := tutil.BuildEStargz(tt.in,
+ tutil.WithEStargzOptions(
+ estargz.WithChunkSize(chunkSize),
+ estargz.WithMinChunkSize(minChunkSize),
+ estargz.WithPrioritizedFiles(tt.prioritizedFiles),
+ estargz.WithCompression(cl),
+ ))
+ if err != nil {
+ t.Fatalf("failed to build eStargz: %v", err)
+ }
+ blob := newBlob(t, sr)
+ mcache := cache.NewMemoryCache()
+ mr, err := factory(sr, metadata.WithDecompressors(cl))
+ if err != nil {
+ t.Fatalf("failed to create metadata reader: %v", err)
+ }
+ defer mr.Close()
+ vr, err := reader.NewReader(mr, mcache, digest.FromString(""))
+ if err != nil {
+ t.Fatalf("failed to create reader: %v", err)
+ }
+ l := newLayer(
+ &Resolver{
+ prefetchTimeout: time.Second,
+ backgroundTaskManager: task.NewBackgroundTaskManager(10, 5*time.Second),
+ },
+ ocispec.Descriptor{Digest: testStateLayerDigest},
+ &blobRef{blob, func() {}},
+ vr,
+ )
+ if err := l.Verify(dgst); err != nil {
+ t.Errorf("failed to verify reader: %v", err)
+ return
+ }
+ prefetchSize := int64(0)
+ if tt.prefetchSize != nil {
+ prefetchSize = tt.prefetchSize(t, l)
+ }
+ if err := l.Prefetch(defaultPrefetchSize); err != nil {
+ t.Errorf("failed to prefetch: %v", err)
+ return
+ }
+ if blob.calledPrefetchOffset != 0 {
+ t.Errorf("invalid prefetch offset %d; want %d",
+ blob.calledPrefetchOffset, 0)
+ }
+ if blob.calledPrefetchSize != prefetchSize {
+ t.Errorf("invalid prefetch size %d; want %d",
+ blob.calledPrefetchSize, prefetchSize)
+ }
+ if cLen := len(mcache.(*cache.MemoryCache).Membuf); tt.wantNum != cLen {
+ t.Errorf("number of chunks in the cache %d; want %d: %v", cLen, tt.wantNum, err)
+ return
+ }
+
+ lr := l.r
+ if lr == nil {
+ t.Fatalf("failed to get reader from layer: %v", err)
+ }
+ for _, file := range tt.wants {
+ id, err := lookup(lr.Metadata(), file)
+ if err != nil {
+ t.Fatalf("failed to lookup %q: %v", file, err)
+ }
+ e, err := lr.Metadata().GetAttr(id)
+ if err != nil {
+ t.Fatalf("failed to get attr of %q: %v", file, err)
+ }
+ wantFile, err := lr.OpenFile(id)
+ if err != nil {
+ t.Fatalf("failed to open file %q", file)
+ }
+ blob.readCalled = false
+ if _, err := io.Copy(io.Discard, io.NewSectionReader(wantFile, 0, e.Size)); err != nil {
+ t.Fatalf("failed to read file %q", file)
+ }
+ if blob.readCalled {
+ t.Errorf("chunks of file %q aren't cached", file)
+ return
+ }
+ }
+ })
+ }
+ }
+}
+
+func lookup(r metadata.Reader, name string) (uint32, error) {
+ name = strings.TrimPrefix(path.Clean("/"+name), "/")
+ if name == "" {
+ return r.RootID(), nil
+ }
+ dir, base := filepath.Split(name)
+ pid, err := lookup(r, dir)
+ if err != nil {
+ return 0, err
+ }
+ id, _, err := r.GetChild(pid, base)
+ return id, err
+}
+
+func chunkNum(data string) int {
+ return (len(data)-1)/sampleChunkSize + 1
+}
+
+type region struct {
+ begin int64
+ end int64 // inclusive
+}
+
+func isDup(a, b region) bool {
+ if a.begin < b.begin {
+ return a.end >= b.begin
+ }
+ // b.begin <= a.begin
+ return b.end >= a.begin
+}
+
+func newBlob(t *testing.T, sr *io.SectionReader) *sampleBlob {
+ return &sampleBlob{
+ t: t,
+ r: sr,
+ }
+}
+
+type sampleBlob struct {
+ t *testing.T
+
+ r *io.SectionReader
+ readCalled bool
+ calledPrefetchOffset int64
+ calledPrefetchSize int64
+ calledRegions []region // sorted
+}
+
+func (sb *sampleBlob) Authn(tr http.RoundTripper) (http.RoundTripper, error) { return nil, nil }
+func (sb *sampleBlob) Check() error { return nil }
+func (sb *sampleBlob) Size() int64 { return sb.r.Size() }
+func (sb *sampleBlob) FetchedSize() int64 { return 0 }
+func (sb *sampleBlob) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) {
+ if len(p) > 0 {
+ target := region{offset, offset + int64(len(p)) - 1}
+ if len(sb.calledRegions) == 0 {
+ sb.calledRegions = []region{target}
+ } else {
+ pos := 0
+ found := false
+ for i, r := range sb.calledRegions {
+ if target.begin < r.begin {
+ pos = i
+ found = true
+ break
+ }
+ }
+ if !found {
+ pos = len(sb.calledRegions)
+ }
+ if pos > 0 {
+ b := sb.calledRegions[pos-1]
+ if isDup(b, target) {
+ sb.t.Fatalf("reading on the previous region is duplicated: %+v and %+v", b, target)
+ }
+ }
+ if pos+1 < len(sb.calledRegions) {
+ a := sb.calledRegions[pos+1]
+ if isDup(a, target) {
+ sb.t.Fatalf("reading on the next region is duplicated: %+v and %+v", a, target)
+ }
+ }
+ sb.calledRegions = append(sb.calledRegions[:pos], append([]region{target}, sb.calledRegions[pos:]...)...)
+ }
+ }
+
+ sb.readCalled = true
+ return sb.r.ReadAt(p, offset)
+}
+func (sb *sampleBlob) Cache(offset int64, size int64, option ...remote.Option) error {
+ sb.calledPrefetchOffset = offset
+ sb.calledPrefetchSize = size
+ return nil
+}
+func (sb *sampleBlob) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
+ return nil
+}
+func (sb *sampleBlob) Close() error { return nil }
+
+const (
+ sampleMiddleOffset = sampleChunkSize / 2
+ lastChunkOffset1 = sampleChunkSize * (int64(len(sampleData1)) / sampleChunkSize)
+)
+
+func testNodeRead(t *testing.T, factory metadata.Store) {
+ sizeCond := map[string]int64{
+ "single_chunk": sampleChunkSize - sampleMiddleOffset,
+ "multi_chunks": sampleChunkSize + sampleMiddleOffset,
+ }
+ innerOffsetCond := map[string]int64{
+ "at_top": 0,
+ "at_middle": sampleMiddleOffset,
+ }
+ baseOffsetCond := map[string]int64{
+ "of_1st_chunk": sampleChunkSize * 0,
+ "of_2nd_chunk": sampleChunkSize * 1,
+ "of_last_chunk": lastChunkOffset1,
+ }
+ fileSizeCond := map[string]int64{
+ "in_1_chunk_file": sampleChunkSize * 1,
+ "in_2_chunks_file": sampleChunkSize * 2,
+ "in_max_size_file": int64(len(sampleData1)),
+ }
+ for sn, size := range sizeCond {
+ for in, innero := range innerOffsetCond {
+ for bo, baseo := range baseOffsetCond {
+ for fn, filesize := range fileSizeCond {
+ for _, srcCompression := range srcCompressions {
+ cl := srcCompression()
+ t.Run(fmt.Sprintf("reading_%s_%s_%s_%s", sn, in, bo, fn), func(t *testing.T) {
+ if filesize > int64(len(sampleData1)) {
+ t.Fatal("sample file size is larger than sample data")
+ }
+
+ wantN := size
+ offset := baseo + innero
+ if remain := filesize - offset; remain < wantN {
+ if wantN = remain; wantN < 0 {
+ wantN = 0
+ }
+ }
+
+ // use constant string value as a data source.
+ want := strings.NewReader(sampleData1)
+
+ // data we want to get.
+ wantData := make([]byte, wantN)
+ _, err := want.ReadAt(wantData, offset)
+ if err != nil && err != io.EOF {
+ t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err)
+ }
+
+ // data we get from the file node.
+ f, closeFn := makeNodeReader(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory, cl)
+ defer closeFn()
+ tmpbuf := make([]byte, size) // fuse library can request bigger than remain
+ rr, errno := f.Read(context.Background(), tmpbuf, offset)
+ if errno != 0 {
+ t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err)
+ return
+ }
+ if rsize := rr.Size(); int64(rsize) != wantN {
+ t.Errorf("read size: %d; want: %d; passed %d", rsize, wantN, size)
+ return
+ }
+ tmpbuf = make([]byte, len(tmpbuf))
+ respData, fs := rr.Bytes(tmpbuf)
+ if fs != fuse.OK {
+ t.Errorf("failed to read result data for off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err)
+ }
+
+ if !bytes.Equal(wantData, respData) {
+ t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)",
+ offset, filesize, len(respData), string(respData), wantN, string(wantData))
+ return
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metadata.Store, cl tutil.Compression) (_ *file, closeFn func() error) {
+ testName := "test"
+ sr, tocDgst, err := tutil.BuildEStargz(
+ []tutil.TarEntry{tutil.File(testName, string(contents))},
+ tutil.WithEStargzOptions(estargz.WithChunkSize(chunkSize), estargz.WithCompression(cl)),
+ )
+ if err != nil {
+ t.Fatalf("failed to build sample eStargz: %v", err)
+ }
+ r, err := factory(sr, metadata.WithDecompressors(cl))
+ if err != nil {
+ t.Fatalf("failed to create reader: %v", err)
+ }
+ rootNode := getRootNode(t, r, OverlayOpaqueAll, tocDgst, cache.NewMemoryCache())
+ var eo fuse.EntryOut
+ inode, errno := rootNode.Lookup(context.Background(), testName, &eo)
+ if errno != 0 {
+ r.Close()
+ t.Fatalf("failed to lookup test node; errno: %v", errno)
+ }
+ f, _, errno := inode.Operations().(fusefs.NodeOpener).Open(context.Background(), 0)
+ if errno != 0 {
+ r.Close()
+ t.Fatalf("failed to open test file; errno: %v", errno)
+ }
+ return f.(*file), r.Close
+}
+
+func testNodes(t *testing.T, factory metadata.Store) {
+ for _, o := range []OverlayOpaqueType{OverlayOpaqueAll, OverlayOpaqueTrusted, OverlayOpaqueUser} {
+ testNodesWithOpaque(t, factory, o)
+ }
+}
+
+func testNodesWithOpaque(t *testing.T, factory metadata.Store, opaque OverlayOpaqueType) {
+ data64KB := string(tutil.RandomBytes(t, 64000))
+ hasOpaque := func(entry string) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ for _, k := range opaqueXattrs[opaque] {
+ hasNodeXattrs(entry, k, opaqueXattrValue)(t, root, cc, cr)
+ }
+ }
+ }
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize int
+ in []tutil.TarEntry
+ want []check
+ }{
+ {
+ name: "1_whiteout_with_sibling",
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/bar.txt", ""),
+ tutil.File("foo/.wh.foo.txt", ""),
+ },
+ want: []check{
+ hasValidWhiteout("foo/foo.txt"),
+ fileNotExist("foo/.wh.foo.txt"),
+ },
+ },
+ {
+ name: "1_whiteout_with_duplicated_name",
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/bar.txt", "test"),
+ tutil.File("foo/.wh.bar.txt", ""),
+ },
+ want: []check{
+ hasFileDigest("foo/bar.txt", digestFor("test")),
+ fileNotExist("foo/.wh.bar.txt"),
+ },
+ },
+ {
+ name: "1_opaque",
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/.wh..wh..opq", ""),
+ },
+ want: []check{
+ hasOpaque("foo/"),
+ fileNotExist("foo/.wh..wh..opq"),
+ },
+ },
+ {
+ name: "1_opaque_with_sibling",
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/.wh..wh..opq", ""),
+ tutil.File("foo/bar.txt", "test"),
+ },
+ want: []check{
+ hasOpaque("foo/"),
+ hasFileDigest("foo/bar.txt", digestFor("test")),
+ fileNotExist("foo/.wh..wh..opq"),
+ },
+ },
+ {
+ name: "1_opaque_with_xattr",
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/", tutil.WithDirXattrs(map[string]string{"foo": "bar"})),
+ tutil.File("foo/.wh..wh..opq", ""),
+ },
+ want: []check{
+ hasOpaque("foo/"),
+ hasNodeXattrs("foo/", "foo", "bar"),
+ fileNotExist("foo/.wh..wh..opq"),
+ },
+ },
+ {
+ name: "prefetch_landmark",
+ in: []tutil.TarEntry{
+ tutil.File(estargz.PrefetchLandmark, "test"),
+ tutil.Dir("foo/"),
+ tutil.File(fmt.Sprintf("foo/%s", estargz.PrefetchLandmark), "test"),
+ },
+ want: []check{
+ fileNotExist(estargz.PrefetchLandmark),
+ hasFileDigest(fmt.Sprintf("foo/%s", estargz.PrefetchLandmark), digestFor("test")),
+ },
+ },
+ {
+ name: "no_prefetch_landmark",
+ in: []tutil.TarEntry{
+ tutil.File(estargz.NoPrefetchLandmark, "test"),
+ tutil.Dir("foo/"),
+ tutil.File(fmt.Sprintf("foo/%s", estargz.NoPrefetchLandmark), "test"),
+ },
+ want: []check{
+ fileNotExist(estargz.NoPrefetchLandmark),
+ hasFileDigest(fmt.Sprintf("foo/%s", estargz.NoPrefetchLandmark), digestFor("test")),
+ },
+ },
+ {
+ name: "state_file",
+ in: []tutil.TarEntry{
+ tutil.File("test", "test"),
+ },
+ want: []check{
+ hasFileDigest("test", digestFor("test")),
+ hasStateFile(t, testStateLayerDigest.String()+".json"),
+ },
+ },
+ {
+ name: "file_suid",
+ in: []tutil.TarEntry{
+ tutil.File("test", "test", tutil.WithFileMode(0644|os.ModeSetuid)),
+ },
+ want: []check{
+ hasExtraMode("test", os.ModeSetuid),
+ },
+ },
+ {
+ name: "dir_sgid",
+ in: []tutil.TarEntry{
+ tutil.Dir("test/", tutil.WithDirMode(0755|os.ModeSetgid)),
+ },
+ want: []check{
+ hasExtraMode("test/", os.ModeSetgid),
+ },
+ },
+ {
+ name: "file_sticky",
+ in: []tutil.TarEntry{
+ tutil.File("test", "test", tutil.WithFileMode(0644|os.ModeSticky)),
+ },
+ want: []check{
+ hasExtraMode("test", os.ModeSticky),
+ },
+ },
+ {
+ name: "symlink_size",
+ in: []tutil.TarEntry{
+ tutil.Symlink("test", "target"),
+ },
+ want: []check{
+ hasSize("test", len("target")),
+ },
+ },
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/foo1", data64KB),
+ tutil.File("foo2", "bb"),
+ tutil.File("foo22", "ccc"),
+ tutil.Dir("bar/"),
+ tutil.File("bar/bar.txt", "aaa"),
+ tutil.File("foo3", data64KB),
+ },
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ // landmark+dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ want: []check{
+ hasFileContentsWithPreCached("foo22", 0, "ccc", chunkInfo{"foo2", "bb", 0, 2}, chunkInfo{"bar/bar.txt", "aaa", 0, 3}, chunkInfo{"foo3", data64KB, 0, 64000}),
+ hasFileContentsOffset("foo2", 0, "bb", true),
+ hasFileContentsOffset("bar/bar.txt", 0, "aaa", true),
+ hasFileContentsOffset("bar/bar.txt", 1, "aa", true),
+ hasFileContentsOffset("bar/bar.txt", 2, "a", true),
+ hasFileContentsOffset("foo3", 0, data64KB, true),
+ hasFileContentsOffset("foo22", 0, "ccc", true),
+ hasFileContentsOffset("foo/foo1", 0, data64KB, false),
+ hasFileContentsOffset("foo/foo1", 0, data64KB, true),
+ hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true),
+ hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true),
+ hasFileContentsOffset("foo/foo1", 3, data64KB[3:], true),
+ },
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/foo1", data64KB),
+ tutil.File("foo2", "bb"),
+ tutil.Dir("bar/"),
+ tutil.File("foo3", data64KB),
+ },
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ // landmark+dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
+ want: []check{
+ hasFileContentsWithPreCached("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000], 0, 32000}),
+ hasFileContentsOffset("foo2", 0, "bb", true),
+ hasFileContentsOffset("foo2", 1, "b", true),
+ hasFileContentsOffset("foo3", 0, data64KB[:len(data64KB)/2], true),
+ hasFileContentsOffset("foo3", 1, data64KB[1:len(data64KB)/2], true),
+ hasFileContentsOffset("foo3", 2, data64KB[2:len(data64KB)/2], true),
+ hasFileContentsOffset("foo3", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], false),
+ hasFileContentsOffset("foo3", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true),
+ hasFileContentsOffset("foo/foo1", 0, data64KB, false),
+ hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true),
+ hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true),
+ hasFileContentsOffset("foo/foo1", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], true),
+ hasFileContentsOffset("foo/foo1", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ for _, srcCompression := range srcCompressions {
+ cl := srcCompression()
+ t.Run(tt.name, func(t *testing.T) {
+ opts := []tutil.BuildEStargzOption{
+ tutil.WithEStargzOptions(estargz.WithCompression(cl)),
+ }
+ if tt.chunkSize > 0 {
+ opts = append(opts, tutil.WithEStargzOptions(estargz.WithChunkSize(tt.chunkSize)))
+ }
+ if tt.minChunkSize > 0 {
+ opts = append(opts, tutil.WithEStargzOptions(estargz.WithMinChunkSize(tt.minChunkSize)))
+ }
+ sgz, tocDgst, err := tutil.BuildEStargz(tt.in, opts...)
+ if err != nil {
+ t.Fatalf("failed to build sample eStargz: %v", err)
+ }
+
+ testR := &calledReaderAt{sgz, nil}
+ r, err := factory(io.NewSectionReader(testR, 0, sgz.Size()), metadata.WithDecompressors(cl))
+ if err != nil {
+ t.Fatalf("failed to create reader: %v", err)
+ }
+ defer r.Close()
+ mcache := cache.NewMemoryCache()
+ rootNode := getRootNode(t, r, opaque, tocDgst, mcache)
+ for _, want := range tt.want {
+ want(t, rootNode, mcache, testR)
+ }
+ })
+ }
+ }
+}
+
+func getRootNode(t *testing.T, r metadata.Reader, opaque OverlayOpaqueType, tocDgst digest.Digest, cc cache.BlobCache) *node {
+ vr, err := reader.NewReader(r, cc, digest.FromString(""))
+ if err != nil {
+ t.Fatalf("failed to create reader: %v", err)
+ }
+ rr, err := vr.VerifyTOC(tocDgst)
+ if err != nil {
+ t.Fatalf("failed to verify reader: %v", err)
+ }
+ rootNode, err := newNode(testStateLayerDigest, rr, &testBlobState{10, 5}, 100, opaque)
+ if err != nil {
+ t.Fatalf("failed to get root node: %v", err)
+ }
+ fusefs.NewNodeFS(rootNode, &fusefs.Options{}) // initializes root node
+ return rootNode.(*node)
+}
+
+type testBlobState struct {
+ size int64
+ fetchedSize int64
+}
+
+func (tb *testBlobState) Check() error { return nil }
+func (tb *testBlobState) Size() int64 { return tb.size }
+func (tb *testBlobState) FetchedSize() int64 { return tb.fetchedSize }
+func (tb *testBlobState) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) {
+ return 0, nil
+}
+func (tb *testBlobState) Cache(offset int64, size int64, opts ...remote.Option) error { return nil }
+func (tb *testBlobState) Refresh(ctx context.Context, host source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
+ return nil
+}
+func (tb *testBlobState) Close() error { return nil }
+
+type check func(*testing.T, *node, cache.BlobCache, *calledReaderAt)
+
+func fileNotExist(file string) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ if _, _, err := getDirentAndNode(t, root, file); err == nil {
+ t.Errorf("Node %q exists", file)
+ }
+ }
+}
+
+func hasFileDigest(filename string, digest string) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ _, n, err := getDirentAndNode(t, root, filename)
+ if err != nil {
+ t.Fatalf("failed to get node %q: %v", filename, err)
+ }
+ ni := n.Operations().(*node)
+ attr, err := ni.fs.r.Metadata().GetAttr(ni.id)
+ if err != nil {
+ t.Fatalf("failed to get attr %q(%d): %v", filename, ni.id, err)
+ }
+ fh, _, errno := ni.Open(context.Background(), 0)
+ if errno != 0 {
+ t.Fatalf("failed to open node %q: %v", filename, errno)
+ }
+ rr, errno := fh.(*file).Read(context.Background(), make([]byte, attr.Size), 0)
+ if errno != 0 {
+ t.Fatalf("failed to read node %q: %v", filename, errno)
+ }
+ res, status := rr.Bytes(make([]byte, attr.Size))
+ if status != fuse.OK {
+ t.Fatalf("failed to get read result of node %q: %v", filename, status)
+ }
+ if ndgst := digestFor(string(res)); ndgst != digest {
+ t.Fatalf("Digest(%q) = %q, want %q", filename, ndgst, digest)
+ }
+ }
+}
+
+func hasSize(name string, size int) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ _, n, err := getDirentAndNode(t, root, name)
+ if err != nil {
+ t.Fatalf("failed to get node %q: %v", name, err)
+ }
+ var ao fuse.AttrOut
+ if errno := n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao); errno != 0 {
+ t.Fatalf("failed to get attributes of node %q: %v", name, errno)
+ }
+ if ao.Attr.Size != uint64(size) {
+ t.Fatalf("got size = %d, want %d", ao.Attr.Size, size)
+ }
+ }
+}
+
+func hasExtraMode(name string, mode os.FileMode) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ _, n, err := getDirentAndNode(t, root, name)
+ if err != nil {
+ t.Fatalf("failed to get node %q: %v", name, err)
+ }
+ var ao fuse.AttrOut
+ if errno := n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao); errno != 0 {
+ t.Fatalf("failed to get attributes of node %q: %v", name, errno)
+ }
+ a := ao.Attr
+ gotMode := a.Mode & (syscall.S_ISUID | syscall.S_ISGID | syscall.S_ISVTX)
+ wantMode := extraModeToTarMode(mode)
+ if gotMode != uint32(wantMode) {
+ t.Fatalf("got mode = %b, want %b", gotMode, wantMode)
+ }
+ }
+}
+
+func hasValidWhiteout(name string) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ ent, n, err := getDirentAndNode(t, root, name)
+ if err != nil {
+ t.Fatalf("failed to get node %q: %v", name, err)
+ }
+ var ao fuse.AttrOut
+ if errno := n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao); errno != 0 {
+ t.Fatalf("failed to get attributes of file %q: %v", name, errno)
+ }
+ a := ao.Attr
+ if a.Ino != ent.Ino {
+ t.Errorf("inconsistent inodes %d(Node) != %d(Dirent)", a.Ino, ent.Ino)
+ return
+ }
+
+ // validate the direntry
+ if ent.Mode != syscall.S_IFCHR {
+ t.Errorf("whiteout entry %q isn't a char device", name)
+ return
+ }
+
+ // validate the node
+ if a.Mode != syscall.S_IFCHR {
+ t.Errorf("whiteout %q has an invalid mode %o; want %o",
+ name, a.Mode, syscall.S_IFCHR)
+ return
+ }
+ if a.Rdev != uint32(unix.Mkdev(0, 0)) {
+ t.Errorf("whiteout %q has invalid device numbers (%d, %d); want (0, 0)",
+ name, unix.Major(uint64(a.Rdev)), unix.Minor(uint64(a.Rdev)))
+ return
+ }
+ }
+}
+
+func hasNodeXattrs(entry, name, value string) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ _, n, err := getDirentAndNode(t, root, entry)
+ if err != nil {
+ t.Fatalf("failed to get node %q: %v", entry, err)
+ }
+
+ // check xattr exists in the xattrs list.
+ buf := make([]byte, 1000)
+ nb, errno := n.Operations().(fusefs.NodeListxattrer).Listxattr(context.Background(), buf)
+ if errno != 0 {
+ t.Fatalf("failed to get xattrs list of node %q: %v", entry, err)
+ }
+ attrs := strings.Split(string(buf[:nb]), "\x00")
+ var found bool
+ for _, x := range attrs {
+ if x == name {
+ found = true
+ }
+ }
+ if !found {
+ t.Errorf("node %q doesn't have an opaque xattr %q", entry, value)
+ return
+ }
+
+ // check the xattr has valid value.
+ v := make([]byte, len(value))
+ nv, errno := n.Operations().(fusefs.NodeGetxattrer).Getxattr(context.Background(), name, v)
+ if errno != 0 {
+ t.Fatalf("failed to get xattr %q of node %q: %v", name, entry, err)
+ }
+ if int(nv) != len(value) {
+ t.Fatalf("invalid xattr size for file %q, value %q got %d; want %d",
+ name, value, nv, len(value))
+ }
+ if string(v) != value {
+ t.Errorf("node %q has an invalid xattr %q; want %q", entry, v, value)
+ return
+ }
+ }
+}
+
+func hasEntry(t *testing.T, name string, ents fusefs.DirStream) (fuse.DirEntry, bool) {
+ for ents.HasNext() {
+ de, errno := ents.Next()
+ if errno != 0 {
+ t.Fatalf("faield to read entries for %q", name)
+ }
+ if de.Name == name {
+ return de, true
+ }
+ }
+ return fuse.DirEntry{}, false
+}
+
+func hasStateFile(t *testing.T, id string) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+
+ // Check the state dir is hidden on OpenDir for "/"
+ ents, errno := root.Readdir(context.Background())
+ if errno != 0 {
+ t.Errorf("failed to open root directory: %v", errno)
+ return
+ }
+ if _, ok := hasEntry(t, stateDirName, ents); ok {
+ t.Errorf("state direntry %q should not be listed", stateDirName)
+ return
+ }
+
+ // Check existence of state dir
+ var eo fuse.EntryOut
+ sti, errno := root.Lookup(context.Background(), stateDirName, &eo)
+ if errno != 0 {
+ t.Errorf("failed to lookup directory %q: %v", stateDirName, errno)
+ return
+ }
+ st, ok := sti.Operations().(*state)
+ if !ok {
+ t.Errorf("directory %q isn't a state node", stateDirName)
+ return
+ }
+
+ // Check existence of state file
+ ents, errno = st.Readdir(context.Background())
+ if errno != 0 {
+ t.Errorf("failed to open directory %q: %v", stateDirName, errno)
+ return
+ }
+ if _, ok := hasEntry(t, id, ents); !ok {
+ t.Errorf("direntry %q not found in %q", id, stateDirName)
+ return
+ }
+ inode, errno := st.Lookup(context.Background(), id, &eo)
+ if errno != 0 {
+ t.Errorf("failed to lookup node %q in %q: %v", id, stateDirName, errno)
+ return
+ }
+ n, ok := inode.Operations().(*statFile)
+ if !ok {
+ t.Errorf("entry %q isn't a normal node", id)
+ return
+ }
+
+ // wanted data
+ b, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
+ if err != nil {
+ panic(err)
+ }
+ wantErr := fmt.Errorf("test-%d", b.Int64())
+
+ // report the data
+ root.fs.s.report(wantErr)
+
+ // obtain file size (check later)
+ var ao fuse.AttrOut
+ errno = n.Operations().(fusefs.NodeGetattrer).Getattr(context.Background(), nil, &ao)
+ if errno != 0 {
+ t.Errorf("failed to get attr of state file: %v", errno)
+ return
+ }
+ attr := ao.Attr
+
+ // get data via state file
+ tmp := make([]byte, 4096)
+ res, errno := n.Read(context.Background(), nil, tmp, 0)
+ if errno != 0 {
+ t.Errorf("failed to read state file: %v", errno)
+ return
+ }
+ gotState, status := res.Bytes(nil)
+ if status != fuse.OK {
+ t.Errorf("failed to get result bytes of state file: %v", errno)
+ return
+ }
+ if attr.Size != uint64(len(string(gotState))) {
+ t.Errorf("size %d; want %d", attr.Size, len(string(gotState)))
+ return
+ }
+
+ var j statJSON
+ if err := json.Unmarshal(gotState, &j); err != nil {
+ t.Errorf("failed to unmarshal %q: %v", string(gotState), err)
+ return
+ }
+ if wantErr.Error() != j.Error {
+ t.Errorf("expected error %q, got %q", wantErr.Error(), j.Error)
+ return
+ }
+ }
+}
+
+// getDirentAndNode gets dirent and node at the specified path at once and makes
+// sure that the both of them exist.
+func getDirentAndNode(t *testing.T, root *node, path string) (ent fuse.DirEntry, n *fusefs.Inode, err error) {
+ dir, base := filepath.Split(filepath.Clean(path))
+
+ // get the target's parent directory.
+ var eo fuse.EntryOut
+ d := root
+ for _, name := range strings.Split(dir, "/") {
+ if len(name) == 0 {
+ continue
+ }
+ di, errno := d.Lookup(context.Background(), name, &eo)
+ if errno != 0 {
+ err = fmt.Errorf("failed to lookup directory %q: %v", name, errno)
+ return
+ }
+ var ok bool
+ if d, ok = di.Operations().(*node); !ok {
+ err = fmt.Errorf("directory %q isn't a normal node", name)
+ return
+ }
+
+ }
+
+ // get the target's direntry.
+ ents, errno := d.Readdir(context.Background())
+ if errno != 0 {
+ err = fmt.Errorf("failed to open directory %q: %v", path, errno)
+ }
+ ent, ok := hasEntry(t, base, ents)
+ if !ok {
+ err = fmt.Errorf("direntry %q not found in the parent directory of %q", base, path)
+ }
+
+ // get the target's node.
+ n, errno = d.Lookup(context.Background(), base, &eo)
+ if errno != 0 {
+ err = fmt.Errorf("failed to lookup node %q: %v", path, errno)
+ }
+
+ return
+}
+
+func digestFor(content string) string {
+ sum := sha256.Sum256([]byte(content))
+ return fmt.Sprintf("sha256:%x", sum)
+}
+
+// suid, guid, sticky bits for archive/tar
+// /~https://github.com/golang/go/blob/release-branch.go1.13/src/archive/tar/common.go#L607-L609
+const (
+ cISUID = 04000 // Set uid
+ cISGID = 02000 // Set gid
+ cISVTX = 01000 // Save text (sticky bit)
+)
+
+func extraModeToTarMode(fm os.FileMode) (tm int64) {
+ if fm&os.ModeSetuid != 0 {
+ tm |= cISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ tm |= cISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ tm |= cISVTX
+ }
+ return
+}
+
+type chunkInfo struct {
+ name string
+ data string
+ chunkOffset int64
+ chunkSize int64
+}
+
+func hasFileContentsWithPreCached(name string, off int64, contents string, extra ...chunkInfo) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ buf := readFile(t, root, name, int64(len(contents)), off)
+ if len(buf) != len(contents) {
+ t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf))
+ }
+ if string(buf) != contents {
+ t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents)))
+ }
+ for _, e := range extra {
+ cr.called = nil // reset test
+ data := readFile(t, root, e.name, e.chunkSize, e.chunkOffset)
+ if string(data) != e.data {
+ t.Fatalf("unexpected contents of %q (%+v): %q; wanted %q", e.name, e, longBytesView(data), longBytesView([]byte(e.data)))
+ }
+ if len(cr.called) != 0 {
+ t.Fatalf("unexpected read on %q: offsets: %v", e.name, cr.called)
+ }
+ }
+ }
+}
+
+func hasFileContentsOffset(name string, off int64, contents string, fromCache bool) check {
+ return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) {
+ cr.called = nil // reset test
+ buf := readFile(t, root, name, int64(len(contents)), off)
+ if len(buf) != len(contents) {
+ t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf))
+ }
+ if string(buf) != contents {
+ t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents)))
+ }
+ t.Logf("reader calls for %q: offsets: %+v", name, cr.called)
+ if fromCache {
+ if len(cr.called) != 0 {
+ t.Fatalf("unexpected read on %q: offsets: %v", name, cr.called)
+ }
+ } else {
+ if len(cr.called) == 0 {
+ t.Fatalf("no call happened to reader for %q", name)
+ }
+ }
+ }
+}
+
+func readFile(t *testing.T, root *node, filename string, size, off int64) []byte {
+ _, n, err := getDirentAndNode(t, root, filename)
+ if err != nil {
+ t.Fatalf("failed to get node %q: %v", filename, err)
+ }
+ ni := n.Operations().(*node)
+ fh, _, errno := ni.Open(context.Background(), 0)
+ if errno != 0 {
+ t.Fatalf("failed to open node %q: %v", filename, errno)
+ }
+ rr, errno := fh.(*file).Read(context.Background(), make([]byte, size), off)
+ if errno != 0 {
+ t.Fatalf("failed to read node %q: %v", filename, errno)
+ }
+ buf, status := rr.Bytes(make([]byte, size))
+ if status != fuse.OK {
+ t.Fatalf("failed to get read result of node %q: %v", filename, status)
+ }
+ return buf
+}
+
+type calledReaderAt struct {
+ io.ReaderAt
+ called []int64
+}
+
+func (r *calledReaderAt) ReadAt(p []byte, off int64) (int, error) {
+ r.called = append(r.called, off)
+ return r.ReaderAt.ReadAt(p, off)
+}
+
+// longBytesView is an alias of []byte suitable for printing a long data as an omitted string to avoid long data being printed.
+type longBytesView []byte
+
+func (b longBytesView) String() string {
+ if len(b) < 100 {
+ return string(b)
+ }
+ return string(b[:50]) + "...(omit)..." + string(b[len(b)-50:])
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/common/metrics.go b/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/common/metrics.go
new file mode 100644
index 000000000000..f2655723f520
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/common/metrics.go
@@ -0,0 +1,216 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package commonmetrics
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/containerd/log"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ // OperationLatencyKeyMilliseconds is the key for stargz operation latency metrics in milliseconds.
+ OperationLatencyKeyMilliseconds = "operation_duration_milliseconds"
+
+ // OperationLatencyKeyMicroseconds is the key for stargz operation latency metrics in microseconds.
+ OperationLatencyKeyMicroseconds = "operation_duration_microseconds"
+
+ // OperationCountKey is the key for stargz operation count metrics.
+ OperationCountKey = "operation_count"
+
+ // BytesServedKey is the key for any metric related to counting bytes served as the part of specific operation.
+ BytesServedKey = "bytes_served"
+
+ // Keep namespace as stargz and subsystem as fs.
+ namespace = "stargz"
+ subsystem = "fs"
+)
+
+// Lists all metric labels.
+const (
+ // prometheus metrics
+ Mount = "mount"
+ RemoteRegistryGet = "remote_registry_get"
+ NodeReaddir = "node_readdir"
+ StargzHeaderGet = "stargz_header_get"
+ StargzFooterGet = "stargz_footer_get"
+ StargzTocGet = "stargz_toc_get"
+ DeserializeTocJSON = "stargz_toc_json_deserialize"
+ PrefetchesCompleted = "all_prefetches_completed"
+ ReadOnDemand = "read_on_demand"
+ MountLayerToLastOnDemandFetch = "mount_layer_to_last_on_demand_fetch"
+
+ OnDemandReadAccessCount = "on_demand_read_access_count"
+ OnDemandRemoteRegistryFetchCount = "on_demand_remote_registry_fetch_count"
+ OnDemandBytesServed = "on_demand_bytes_served"
+ OnDemandBytesFetched = "on_demand_bytes_fetched"
+
+ // logs metrics
+ PrefetchTotal = "prefetch_total"
+ PrefetchDownload = "prefetch_download"
+ PrefetchDecompress = "prefetch_decompress"
+ BackgroundFetchTotal = "background_fetch_total"
+ BackgroundFetchDownload = "background_fetch_download"
+ BackgroundFetchDecompress = "background_fetch_decompress"
+ PrefetchSize = "prefetch_size"
+)
+
+var (
+ // Buckets for OperationLatency metrics.
+ latencyBucketsMilliseconds = []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384} // in milliseconds
+ latencyBucketsMicroseconds = []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024} // in microseconds
+
+ // operationLatencyMilliseconds collects operation latency numbers in milliseconds grouped by
+ // operation, type and layer digest.
+ operationLatencyMilliseconds = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: OperationLatencyKeyMilliseconds,
+ Help: "Latency in milliseconds of stargz snapshotter operations. Broken down by operation type and layer sha.",
+ Buckets: latencyBucketsMilliseconds,
+ },
+ []string{"operation_type", "layer"},
+ )
+
+ // operationLatencyMicroseconds collects operation latency numbers in microseconds grouped by
+ // operation, type and layer digest.
+ operationLatencyMicroseconds = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: OperationLatencyKeyMicroseconds,
+ Help: "Latency in microseconds of stargz snapshotter operations. Broken down by operation type and layer sha.",
+ Buckets: latencyBucketsMicroseconds,
+ },
+ []string{"operation_type", "layer"},
+ )
+
+ // operationCount collects operation count numbers by operation
+ // type and layer sha.
+ operationCount = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: OperationCountKey,
+ Help: "The count of stargz snapshotter operations. Broken down by operation type and layer sha.",
+ },
+ []string{"operation_type", "layer"},
+ )
+
+ // bytesCount reflects the number of bytes served as the part of specitic operation type per layer sha.
+ bytesCount = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: BytesServedKey,
+ Help: "The number of bytes served per stargz snapshotter operations. Broken down by operation type and layer sha.",
+ },
+ []string{"operation_type", "layer"},
+ )
+)
+
+var register sync.Once
+var logLevel = log.DebugLevel
+
+// sinceInMilliseconds gets the time since the specified start in milliseconds.
+// The division by 1e6 is made to have the milliseconds value as floating point number, since the native method
+// .Milliseconds() returns an integer value and you can lost a precision for sub-millisecond values.
+func sinceInMilliseconds(start time.Time) float64 {
+ return float64(time.Since(start).Nanoseconds()) / 1e6
+}
+
+// sinceInMicroseconds gets the time since the specified start in microseconds.
+// The division by 1e3 is made to have the microseconds value as floating point number, since the native method
+// .Microseconds() returns an integer value and you can lost a precision for sub-microsecond values.
+func sinceInMicroseconds(start time.Time) float64 {
+ return float64(time.Since(start).Nanoseconds()) / 1e3
+}
+
+// Register registers metrics. This is always called only once.
+func Register(l log.Level) {
+ register.Do(func() {
+ logLevel = l
+ prometheus.MustRegister(operationLatencyMilliseconds)
+ prometheus.MustRegister(operationLatencyMicroseconds)
+ prometheus.MustRegister(operationCount)
+ prometheus.MustRegister(bytesCount)
+ })
+}
+
+// MeasureLatencyInMilliseconds wraps the labels attachment as well as calling Observe into a single method.
+// Right now we attach the operation and layer digest, so it's possible to see the breakdown for latency
+// by operation and individual layers.
+// If you want this to be layer agnostic, just pass the digest from empty string, e.g.
+// layerDigest := digest.FromString("")
+func MeasureLatencyInMilliseconds(operation string, layer digest.Digest, start time.Time) {
+ operationLatencyMilliseconds.WithLabelValues(operation, layer.String()).Observe(sinceInMilliseconds(start))
+}
+
+// MeasureLatencyInMicroseconds wraps the labels attachment as well as calling Observe into a single method.
+// Right now we attach the operation and layer digest, so it's possible to see the breakdown for latency
+// by operation and individual layers.
+// If you want this to be layer agnostic, just pass the digest from empty string, e.g.
+// layerDigest := digest.FromString("")
+func MeasureLatencyInMicroseconds(operation string, layer digest.Digest, start time.Time) {
+ operationLatencyMicroseconds.WithLabelValues(operation, layer.String()).Observe(sinceInMicroseconds(start))
+}
+
+// IncOperationCount wraps the labels attachment as well as calling Inc into a single method.
+func IncOperationCount(operation string, layer digest.Digest) {
+ operationCount.WithLabelValues(operation, layer.String()).Inc()
+}
+
+// AddBytesCount wraps the labels attachment as well as calling Add into a single method.
+func AddBytesCount(operation string, layer digest.Digest, bytes int64) {
+ bytesCount.WithLabelValues(operation, layer.String()).Add(float64(bytes))
+}
+
+// WriteLatencyLogValue wraps writing the log info record for latency in milliseconds. The log record breaks down by operation and layer digest.
+func WriteLatencyLogValue(ctx context.Context, layer digest.Digest, operation string, start time.Time) {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("metrics", "latency").WithField("operation", operation).WithField("layer_sha", layer.String()))
+ log.G(ctx).Logf(logLevel, "value=%v milliseconds", sinceInMilliseconds(start))
+}
+
+// WriteLatencyWithBytesLogValue wraps writing the log info record for latency in milliseconds with adding the size in bytes.
+// The log record breaks down by operation, layer digest and byte value.
+func WriteLatencyWithBytesLogValue(ctx context.Context, layer digest.Digest, latencyOperation string, start time.Time, bytesMetricName string, bytesMetricValue int64) {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("metrics", "latency").WithField("operation", latencyOperation).WithField("layer_sha", layer.String()))
+ log.G(ctx).Logf(logLevel, "value=%v milliseconds; %v=%v bytes", sinceInMilliseconds(start), bytesMetricName, bytesMetricValue)
+}
+
+// LogLatencyForLastOnDemandFetch implements a special case for measuring the latency of last on demand fetch, which must be invoked at the end of
+// background fetch operation only. Since this is expected to happen only once per container launch, it writes a log line,
+// instead of directly emitting a metric.
+// We do that in the following way:
+// 1. We record the mount start time
+// 2. We constantly record the timestamps when we do on demand fetch for each layer sha
+// 3. On background fetch completed we measure the difference between the last on demand fetch and mount start time
+// and record it as a metric
+func LogLatencyForLastOnDemandFetch(ctx context.Context, layer digest.Digest, start time.Time, end time.Time) {
+ diffInMilliseconds := float64(end.Sub(start).Milliseconds())
+ // value can be negative if we pass the default value for time.Time as `end`
+ // this can happen if there were no on-demand fetch for the particular layer
+ if diffInMilliseconds > 0 {
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("metrics", "latency").WithField("operation", MountLayerToLastOnDemandFetch).WithField("layer_sha", layer.String()))
+ log.G(ctx).Logf(logLevel, "value=%v milliseconds", diffInMilliseconds)
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/layer/layer.go b/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/layer/layer.go
new file mode 100644
index 000000000000..329e57dab8b9
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/layer/layer.go
@@ -0,0 +1,65 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package layermetrics
+
+import (
+ "github.com/containerd/stargz-snapshotter/fs/layer"
+ metrics "github.com/docker/go-metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var layerMetrics = []*metric{
+ {
+ name: "layer_fetched_size",
+ help: "Total fetched size of the layer",
+ unit: metrics.Bytes,
+ vt: prometheus.CounterValue,
+ getValues: func(l layer.Layer) []value {
+ return []value{
+ {
+ v: float64(l.Info().FetchedSize),
+ },
+ }
+ },
+ },
+ {
+ name: "layer_prefetch_size",
+ help: "Total prefetched size of the layer",
+ unit: metrics.Bytes,
+ vt: prometheus.CounterValue,
+ getValues: func(l layer.Layer) []value {
+ return []value{
+ {
+ v: float64(l.Info().PrefetchSize),
+ },
+ }
+ },
+ },
+ {
+ name: "layer_size",
+ help: "Total size of the layer",
+ unit: metrics.Bytes,
+ vt: prometheus.CounterValue,
+ getValues: func(l layer.Layer) []value {
+ return []value{
+ {
+ v: float64(l.Info().Size),
+ },
+ }
+ },
+ },
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/layer/metrics.go b/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/layer/metrics.go
new file mode 100644
index 000000000000..00d9bbaf43b9
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/metrics/layer/metrics.go
@@ -0,0 +1,113 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package layermetrics
+
+import (
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/fs/layer"
+ metrics "github.com/docker/go-metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func NewLayerMetrics(ns *metrics.Namespace) *Controller {
+ if ns == nil {
+ return &Controller{}
+ }
+ c := &Controller{
+ ns: ns,
+ layer: make(map[string]layer.Layer),
+ }
+ c.metrics = append(c.metrics, layerMetrics...)
+ ns.Add(c)
+ return c
+}
+
+type Controller struct {
+ ns *metrics.Namespace
+ metrics []*metric
+
+ layer map[string]layer.Layer
+ layerMu sync.RWMutex
+}
+
+func (c *Controller) Describe(ch chan<- *prometheus.Desc) {
+ for _, e := range c.metrics {
+ ch <- e.desc(c.ns)
+ }
+}
+
+func (c *Controller) Collect(ch chan<- prometheus.Metric) {
+ c.layerMu.RLock()
+ wg := &sync.WaitGroup{}
+ for mp, l := range c.layer {
+ mp, l := mp, l
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for _, e := range c.metrics {
+ e.collect(mp, l, c.ns, ch)
+ }
+ }()
+ }
+ c.layerMu.RUnlock()
+ wg.Wait()
+}
+
+func (c *Controller) Add(key string, l layer.Layer) {
+ if c.ns == nil {
+ return
+ }
+ c.layerMu.Lock()
+ c.layer[key] = l
+ c.layerMu.Unlock()
+}
+
+func (c *Controller) Remove(key string) {
+ if c.ns == nil {
+ return
+ }
+ c.layerMu.Lock()
+ delete(c.layer, key)
+ c.layerMu.Unlock()
+}
+
+type value struct {
+ v float64
+ l []string
+}
+
+type metric struct {
+ name string
+ help string
+ unit metrics.Unit
+ vt prometheus.ValueType
+ labels []string
+ // getValues returns the value and labels for the data
+ getValues func(l layer.Layer) []value
+}
+
+func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
+ return ns.NewDesc(m.name, m.help, m.unit, append([]string{"digest", "mountpoint"}, m.labels...)...)
+}
+
+func (m *metric) collect(mountpoint string, l layer.Layer, ns *metrics.Namespace, ch chan<- prometheus.Metric) {
+ values := m.getValues(l)
+ for _, v := range values {
+ ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{l.Info().Digest.String(), mountpoint}, v.l...)...)
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go
new file mode 100644
index 000000000000..c860b87dec4e
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go
@@ -0,0 +1,579 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package reader
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/cache"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ "github.com/hashicorp/go-multierror"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/sync/semaphore"
+)
+
+const maxWalkDepth = 10000
+
+type Reader interface {
+ OpenFile(id uint32) (io.ReaderAt, error)
+ Metadata() metadata.Reader
+ Close() error
+ LastOnDemandReadTime() time.Time
+}
+
+// VerifiableReader produces a Reader with a given verifier.
+type VerifiableReader struct {
+ r *reader
+
+ lastVerifyErr error
+ lastVerifyErrMu sync.Mutex
+ prohibitVerifyFailure bool
+ prohibitVerifyFailureMu sync.RWMutex
+
+ closed bool
+ closedMu sync.Mutex
+
+ verifier func(uint32, string) (digest.Verifier, error)
+}
+
+func (vr *VerifiableReader) storeLastVerifyErr(err error) {
+ vr.lastVerifyErrMu.Lock()
+ vr.lastVerifyErr = err
+ vr.lastVerifyErrMu.Unlock()
+}
+
+func (vr *VerifiableReader) loadLastVerifyErr() error {
+ vr.lastVerifyErrMu.Lock()
+ err := vr.lastVerifyErr
+ vr.lastVerifyErrMu.Unlock()
+ return err
+}
+
+func (vr *VerifiableReader) SkipVerify() Reader {
+ return vr.r
+}
+
+func (vr *VerifiableReader) VerifyTOC(tocDigest digest.Digest) (Reader, error) {
+ if vr.isClosed() {
+ return nil, fmt.Errorf("reader is already closed")
+ }
+ vr.prohibitVerifyFailureMu.Lock()
+ vr.prohibitVerifyFailure = true
+ lastVerifyErr := vr.loadLastVerifyErr()
+ vr.prohibitVerifyFailureMu.Unlock()
+ if err := lastVerifyErr; err != nil {
+ return nil, fmt.Errorf("content error occurs during caching contents: %w", err)
+ }
+ if actual := vr.r.r.TOCDigest(); actual != tocDigest {
+ return nil, fmt.Errorf("invalid TOC JSON %q; want %q", actual, tocDigest)
+ }
+ vr.r.verify = true
+ return vr.r, nil
+}
+
+func (vr *VerifiableReader) Metadata() metadata.Reader {
+ // TODO: this shouldn't be called before verified
+ return vr.r.r
+}
+
+func (vr *VerifiableReader) Cache(opts ...CacheOption) (err error) {
+ if vr.isClosed() {
+ return fmt.Errorf("reader is already closed")
+ }
+
+ var cacheOpts cacheOptions
+ for _, o := range opts {
+ o(&cacheOpts)
+ }
+
+ gr := vr.r
+ r := gr.r
+ if cacheOpts.reader != nil {
+ r, err = r.Clone(cacheOpts.reader)
+ if err != nil {
+ return err
+ }
+ }
+ rootID := r.RootID()
+
+ filter := func(int64) bool {
+ return true
+ }
+ if cacheOpts.filter != nil {
+ filter = cacheOpts.filter
+ }
+
+ eg, egCtx := errgroup.WithContext(context.Background())
+ eg.Go(func() error {
+ return vr.cacheWithReader(egCtx,
+ 0, eg, semaphore.NewWeighted(int64(runtime.GOMAXPROCS(0))),
+ rootID, r, filter, cacheOpts.cacheOpts...)
+ })
+ return eg.Wait()
+}
+
+func (vr *VerifiableReader) cacheWithReader(ctx context.Context, currentDepth int, eg *errgroup.Group, sem *semaphore.Weighted, dirID uint32, r metadata.Reader, filter func(int64) bool, opts ...cache.Option) (rErr error) {
+ if currentDepth > maxWalkDepth {
+ return fmt.Errorf("tree is too deep (depth:%d)", currentDepth)
+ }
+ rootID := r.RootID()
+ r.ForeachChild(dirID, func(name string, id uint32, mode os.FileMode) bool {
+ e, err := r.GetAttr(id)
+ if err != nil {
+ rErr = err
+ return false
+ }
+ if mode.IsDir() {
+ // Walk through all files on this stargz file.
+
+ // Ignore the entry of "./" (formated as "" by stargz lib) on root directory
+ // because this points to the root directory itself.
+ if dirID == rootID && name == "" {
+ return true
+ }
+
+ if err := vr.cacheWithReader(ctx, currentDepth+1, eg, sem, id, r, filter, opts...); err != nil {
+ rErr = err
+ return false
+ }
+ return true
+ } else if !mode.IsRegular() {
+ // Only cache regular files
+ return true
+ } else if dirID == rootID && name == estargz.TOCTarName {
+ // We don't need to cache TOC json file
+ return true
+ }
+
+ offset, err := r.GetOffset(id)
+ if err != nil {
+ rErr = err
+ return false
+ }
+ if !filter(offset) {
+ // This entry need to be filtered out
+ return true
+ }
+
+ fr, err := r.OpenFileWithPreReader(id, func(nid uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) (retErr error) {
+ return vr.readAndCache(nid, r, chunkOffset, chunkSize, chunkDigest, opts...)
+ })
+ if err != nil {
+ rErr = err
+ return false
+ }
+
+ var nr int64
+ for nr < e.Size {
+ chunkOffset, chunkSize, chunkDigestStr, ok := fr.ChunkEntryForOffset(nr)
+ if !ok {
+ break
+ }
+ nr += chunkSize
+
+ if err := sem.Acquire(ctx, 1); err != nil {
+ rErr = err
+ return false
+ }
+
+ eg.Go(func() error {
+ defer sem.Release(1)
+ err := vr.readAndCache(id, io.NewSectionReader(fr, chunkOffset, chunkSize), chunkOffset, chunkSize, chunkDigestStr, opts...)
+ if err != nil {
+ return fmt.Errorf("failed to read %q (off:%d,size:%d): %w", name, chunkOffset, chunkSize, err)
+ }
+ return nil
+ })
+ }
+
+ return true
+ })
+
+ return
+}
+
+func (vr *VerifiableReader) readAndCache(id uint32, fr io.Reader, chunkOffset, chunkSize int64, chunkDigest string, opts ...cache.Option) (retErr error) {
+ gr := vr.r
+
+ if retErr != nil {
+ vr.storeLastVerifyErr(retErr)
+ }
+
+ // Check if it already exists in the cache
+ cacheID := genID(id, chunkOffset, chunkSize)
+ if r, err := gr.cache.Get(cacheID); err == nil {
+ r.Close()
+ return nil
+ }
+
+ // missed cache, needs to fetch and add it to the cache
+ br := bufio.NewReaderSize(fr, int(chunkSize))
+ if _, err := br.Peek(int(chunkSize)); err != nil {
+ return fmt.Errorf("cacheWithReader.peek: %v", err)
+ }
+ w, err := gr.cache.Add(cacheID, opts...)
+ if err != nil {
+ return err
+ }
+ defer w.Close()
+ v, err := vr.verifier(id, chunkDigest)
+ if err != nil {
+ vr.prohibitVerifyFailureMu.RLock()
+ if vr.prohibitVerifyFailure {
+ vr.prohibitVerifyFailureMu.RUnlock()
+ return fmt.Errorf("verifier not found: %w", err)
+ }
+ vr.storeLastVerifyErr(err)
+ vr.prohibitVerifyFailureMu.RUnlock()
+ }
+ tee := io.Discard
+ if v != nil {
+ tee = io.Writer(v) // verification is required
+ }
+ if _, err := io.CopyN(w, io.TeeReader(br, tee), chunkSize); err != nil {
+ w.Abort()
+ return fmt.Errorf("failed to cache file payload: %w", err)
+ }
+ if v != nil && !v.Verified() {
+ err := fmt.Errorf("invalid chunk")
+ vr.prohibitVerifyFailureMu.RLock()
+ if vr.prohibitVerifyFailure {
+ vr.prohibitVerifyFailureMu.RUnlock()
+ w.Abort()
+ return err
+ }
+ vr.storeLastVerifyErr(err)
+ vr.prohibitVerifyFailureMu.RUnlock()
+ }
+
+ return w.Commit()
+}
+
+func (vr *VerifiableReader) Close() error {
+ vr.closedMu.Lock()
+ defer vr.closedMu.Unlock()
+ if vr.closed {
+ return nil
+ }
+ vr.closed = true
+ return vr.r.Close()
+}
+
+func (vr *VerifiableReader) isClosed() bool {
+ vr.closedMu.Lock()
+ closed := vr.closed
+ vr.closedMu.Unlock()
+ return closed
+}
+
+// NewReader creates a Reader based on the given stargz blob and cache implementation.
+// It returns VerifiableReader so the caller must provide a metadata.ChunkVerifier
+// to use for verifying file or chunk contained in this stargz blob.
+func NewReader(r metadata.Reader, cache cache.BlobCache, layerSha digest.Digest) (*VerifiableReader, error) {
+ vr := &reader{
+ r: r,
+ cache: cache,
+ bufPool: sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ },
+ layerSha: layerSha,
+ verifier: digestVerifier,
+ }
+ return &VerifiableReader{r: vr, verifier: digestVerifier}, nil
+}
+
+type reader struct {
+ r metadata.Reader
+ cache cache.BlobCache
+ bufPool sync.Pool
+
+ layerSha digest.Digest
+
+ lastReadTime time.Time
+ lastReadTimeMu sync.Mutex
+
+ closed bool
+ closedMu sync.Mutex
+
+ verify bool
+ verifier func(uint32, string) (digest.Verifier, error)
+}
+
+func (gr *reader) Metadata() metadata.Reader {
+ return gr.r
+}
+
+func (gr *reader) setLastReadTime(lastReadTime time.Time) {
+ gr.lastReadTimeMu.Lock()
+ gr.lastReadTime = lastReadTime
+ gr.lastReadTimeMu.Unlock()
+}
+
+func (gr *reader) LastOnDemandReadTime() time.Time {
+ gr.lastReadTimeMu.Lock()
+ t := gr.lastReadTime
+ gr.lastReadTimeMu.Unlock()
+ return t
+}
+
+func (gr *reader) OpenFile(id uint32) (io.ReaderAt, error) {
+ if gr.isClosed() {
+ return nil, fmt.Errorf("reader is already closed")
+ }
+ var fr metadata.File
+ fr, err := gr.r.OpenFileWithPreReader(id, func(nid uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) error {
+ // Check if it already exists in the cache
+ cacheID := genID(nid, chunkOffset, chunkSize)
+ if r, err := gr.cache.Get(cacheID); err == nil {
+ r.Close()
+ return nil
+ }
+
+ // Read and cache
+ b := gr.bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ b.Grow(int(chunkSize))
+ ip := b.Bytes()[:chunkSize]
+ if _, err := io.ReadFull(r, ip); err != nil {
+ gr.putBuffer(b)
+ return err
+ }
+ err := gr.verifyAndCache(nid, ip, chunkDigest, cacheID)
+ gr.putBuffer(b)
+ return err
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to open file %d: %w", id, err)
+ }
+ return &file{
+ id: id,
+ fr: fr,
+ gr: gr,
+ }, nil
+}
+
+func (gr *reader) Close() (retErr error) {
+ gr.closedMu.Lock()
+ defer gr.closedMu.Unlock()
+ if gr.closed {
+ return nil
+ }
+ gr.closed = true
+ if err := gr.cache.Close(); err != nil {
+ retErr = multierror.Append(retErr, err)
+ }
+ if err := gr.r.Close(); err != nil {
+ retErr = multierror.Append(retErr, err)
+ }
+ return
+}
+
+func (gr *reader) isClosed() bool {
+ gr.closedMu.Lock()
+ closed := gr.closed
+ gr.closedMu.Unlock()
+ return closed
+}
+
+func (gr *reader) putBuffer(b *bytes.Buffer) {
+ b.Reset()
+ gr.bufPool.Put(b)
+}
+
+type file struct {
+ id uint32
+ fr metadata.File
+ gr *reader
+}
+
+// ReadAt reads chunks from the stargz file with trying to fetch as many chunks
+// as possible from the cache.
+func (sf *file) ReadAt(p []byte, offset int64) (int, error) {
+ nr := 0
+ for nr < len(p) {
+ chunkOffset, chunkSize, chunkDigestStr, ok := sf.fr.ChunkEntryForOffset(offset + int64(nr))
+ if !ok {
+ break
+ }
+ var (
+ id = genID(sf.id, chunkOffset, chunkSize)
+ lowerDiscard = positive(offset - chunkOffset)
+ upperDiscard = positive(chunkOffset + chunkSize - (offset + int64(len(p))))
+ expectedSize = chunkSize - upperDiscard - lowerDiscard
+ )
+
+ // Check if the content exists in the cache
+ if r, err := sf.gr.cache.Get(id); err == nil {
+ n, err := r.ReadAt(p[nr:int64(nr)+expectedSize], lowerDiscard)
+ if (err == nil || err == io.EOF) && int64(n) == expectedSize {
+ nr += n
+ r.Close()
+ continue
+ }
+ r.Close()
+ }
+
+ // We missed cache. Take it from underlying reader.
+ // We read the whole chunk here and add it to the cache so that following
+ // reads against neighboring chunks can take the data without decmpression.
+ if lowerDiscard == 0 && upperDiscard == 0 {
+ // We can directly store the result to the given buffer
+ ip := p[nr : int64(nr)+chunkSize]
+ n, err := sf.fr.ReadAt(ip, chunkOffset)
+ if err != nil && err != io.EOF {
+ return 0, fmt.Errorf("failed to read data: %w", err)
+ }
+ if err := sf.gr.verifyAndCache(sf.id, ip, chunkDigestStr, id); err != nil {
+ return 0, err
+ }
+ nr += n
+ continue
+ }
+
+ // Use temporally buffer for aligning this chunk
+ b := sf.gr.bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ b.Grow(int(chunkSize))
+ ip := b.Bytes()[:chunkSize]
+ if _, err := sf.fr.ReadAt(ip, chunkOffset); err != nil && err != io.EOF {
+ sf.gr.putBuffer(b)
+ return 0, fmt.Errorf("failed to read data: %w", err)
+ }
+ if err := sf.gr.verifyAndCache(sf.id, ip, chunkDigestStr, id); err != nil {
+ sf.gr.putBuffer(b)
+ return 0, err
+ }
+ n := copy(p[nr:], ip[lowerDiscard:chunkSize-upperDiscard])
+ sf.gr.putBuffer(b)
+ if int64(n) != expectedSize {
+ return 0, fmt.Errorf("unexpected final data size %d; want %d", n, expectedSize)
+ }
+ nr += n
+ }
+
+ commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesServed, sf.gr.layerSha, int64(nr)) // measure the number of on demand bytes served
+
+ return nr, nil
+}
+
+func (gr *reader) verifyAndCache(entryID uint32, ip []byte, chunkDigestStr string, cacheID string) error {
+ // We can end up doing on demand registry fetch when aligning the chunk
+ commonmetrics.IncOperationCount(commonmetrics.OnDemandRemoteRegistryFetchCount, gr.layerSha) // increment the number of on demand file fetches from remote registry
+ commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesFetched, gr.layerSha, int64(len(ip))) // record total bytes fetched
+ gr.setLastReadTime(time.Now())
+
+ // Verify this chunk
+ if err := gr.verifyChunk(entryID, ip, chunkDigestStr); err != nil {
+ return fmt.Errorf("invalid chunk: %w", err)
+ }
+
+ // Cache this chunk
+ if w, err := gr.cache.Add(cacheID); err == nil {
+ if cn, err := w.Write(ip); err != nil || cn != len(ip) {
+ w.Abort()
+ } else {
+ w.Commit()
+ }
+ w.Close()
+ }
+
+ return nil
+}
+
+func (gr *reader) verifyChunk(id uint32, p []byte, chunkDigestStr string) error {
+ if !gr.verify {
+ return nil // verification is not required
+ }
+ v, err := gr.verifier(id, chunkDigestStr)
+ if err != nil {
+ return fmt.Errorf("invalid chunk: %w", err)
+ }
+ if _, err := v.Write(p); err != nil {
+ return fmt.Errorf("invalid chunk: failed to write to verifier: %w", err)
+ }
+ if !v.Verified() {
+ return fmt.Errorf("invalid chunk: not verified")
+ }
+
+ return nil
+}
+
+func genID(id uint32, offset, size int64) string {
+ sum := sha256.Sum256([]byte(fmt.Sprintf("%d-%d-%d", id, offset, size)))
+ return fmt.Sprintf("%x", sum)
+}
+
+func positive(n int64) int64 {
+ if n < 0 {
+ return 0
+ }
+ return n
+}
+
+type CacheOption func(*cacheOptions)
+
+type cacheOptions struct {
+ cacheOpts []cache.Option
+ filter func(int64) bool
+ reader *io.SectionReader
+}
+
+func WithCacheOpts(cacheOpts ...cache.Option) CacheOption {
+ return func(opts *cacheOptions) {
+ opts.cacheOpts = cacheOpts
+ }
+}
+
+func WithFilter(filter func(int64) bool) CacheOption {
+ return func(opts *cacheOptions) {
+ opts.filter = filter
+ }
+}
+
+func WithReader(sr *io.SectionReader) CacheOption {
+ return func(opts *cacheOptions) {
+ opts.reader = sr
+ }
+}
+
+func digestVerifier(id uint32, chunkDigestStr string) (digest.Verifier, error) {
+ chunkDigest, err := digest.Parse(chunkDigestStr)
+ if err != nil {
+ return nil, fmt.Errorf("invalid chunk: no digest is recorded(len=%d): %w", len(chunkDigestStr), err)
+ }
+ return chunkDigest.Verifier(), nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go
new file mode 100644
index 000000000000..e564b4f9fb08
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go
@@ -0,0 +1,821 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package reader
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/cache"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ tutil "github.com/containerd/stargz-snapshotter/util/testutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sync/errgroup"
+)
+
+type region struct{ b, e int64 }
+
+const (
+ sampleChunkSize = 3
+ sampleMiddleOffset = sampleChunkSize / 2
+ sampleData1 = "0123456789"
+ lastChunkOffset1 = sampleChunkSize * (int64(len(sampleData1)) / sampleChunkSize)
+)
+
+var srcCompressions = map[string]tutil.CompressionFactory{
+ "zstd-fastest": tutil.ZstdCompressionWithLevel(zstd.SpeedFastest),
+ "gzip-bestspeed": tutil.GzipCompressionWithLevel(gzip.BestSpeed),
+ "externaltoc-gzip-bestspeed": tutil.ExternalTOCGzipCompressionWithLevel(gzip.BestSpeed),
+}
+
+func TestSuiteReader(t *testing.T, store metadata.Store) {
+ testFileReadAt(t, store)
+ testCacheVerify(t, store)
+ testFailReader(t, store)
+ testPreReader(t, store)
+}
+
+func testFileReadAt(t *testing.T, factory metadata.Store) {
+ sizeCond := map[string]int64{
+ "single_chunk": sampleChunkSize - sampleMiddleOffset,
+ "multi_chunks": sampleChunkSize + sampleMiddleOffset,
+ }
+ innerOffsetCond := map[string]int64{
+ "at_top": 0,
+ "at_middle": sampleMiddleOffset,
+ }
+ baseOffsetCond := map[string]int64{
+ "of_1st_chunk": sampleChunkSize * 0,
+ "of_2nd_chunk": sampleChunkSize * 1,
+ "of_last_chunk": lastChunkOffset1,
+ }
+ fileSizeCond := map[string]int64{
+ "in_1_chunk_file": sampleChunkSize * 1,
+ "in_2_chunks_file": sampleChunkSize * 2,
+ "in_max_size_file": int64(len(sampleData1)),
+ }
+ cacheCond := map[string][]region{
+ "with_clean_cache": nil,
+ "with_edge_filled_cache": {
+ region{0, sampleChunkSize - 1},
+ region{lastChunkOffset1, int64(len(sampleData1)) - 1},
+ },
+ "with_sparse_cache": {
+ region{0, sampleChunkSize - 1},
+ region{2 * sampleChunkSize, 3*sampleChunkSize - 1},
+ },
+ }
+ for sn, size := range sizeCond {
+ for in, innero := range innerOffsetCond {
+ for bo, baseo := range baseOffsetCond {
+ for fn, filesize := range fileSizeCond {
+ for cc, cacheExcept := range cacheCond {
+ for srcCompressionName, srcCompression := range srcCompressions {
+ srcCompression := srcCompression()
+ t.Run(fmt.Sprintf("reading_%s_%s_%s_%s_%s_%s", sn, in, bo, fn, cc, srcCompressionName), func(t *testing.T) {
+ if filesize > int64(len(sampleData1)) {
+ t.Fatal("sample file size is larger than sample data")
+ }
+
+ wantN := size
+ offset := baseo + innero
+ if remain := filesize - offset; remain < wantN {
+ if wantN = remain; wantN < 0 {
+ wantN = 0
+ }
+ }
+
+ // use constant string value as a data source.
+ want := strings.NewReader(sampleData1)
+
+ // data we want to get.
+ wantData := make([]byte, wantN)
+ _, err := want.ReadAt(wantData, offset)
+ if err != nil && err != io.EOF {
+ t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err)
+ }
+
+ // data we get through a file.
+ f, closeFn := makeFile(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory, srcCompression)
+ defer closeFn()
+ f.fr = newExceptFile(t, f.fr, cacheExcept...)
+ for _, reg := range cacheExcept {
+ id := genID(f.id, reg.b, reg.e-reg.b+1)
+ w, err := f.gr.cache.Add(id)
+ if err != nil {
+ w.Close()
+ t.Fatalf("failed to add cache %v: %v", id, err)
+ }
+ if _, err := w.Write([]byte(sampleData1[reg.b : reg.e+1])); err != nil {
+ w.Close()
+ t.Fatalf("failed to write cache %v: %v", id, err)
+ }
+ if err := w.Commit(); err != nil {
+ w.Close()
+ t.Fatalf("failed to commit cache %v: %v", id, err)
+ }
+ w.Close()
+ }
+ respData := make([]byte, size)
+ n, err := f.ReadAt(respData, offset)
+ if err != nil {
+ t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err)
+ return
+ }
+ respData = respData[:n]
+
+ if !bytes.Equal(wantData, respData) {
+ t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)",
+ offset, filesize, len(respData), string(respData), wantN, string(wantData))
+ return
+ }
+
+ // check cache has valid contents.
+ cn := 0
+ nr := 0
+ for int64(nr) < wantN {
+ chunkOffset, chunkSize, _, ok := f.fr.ChunkEntryForOffset(offset + int64(nr))
+ if !ok {
+ break
+ }
+ data := make([]byte, chunkSize)
+ id := genID(f.id, chunkOffset, chunkSize)
+ r, err := f.gr.cache.Get(id)
+ if err != nil {
+ t.Errorf("missed cache of offset=%d, size=%d: %v(got size=%d)", chunkOffset, chunkSize, err, n)
+ return
+ }
+ defer r.Close()
+ if n, err := r.ReadAt(data, 0); (err != nil && err != io.EOF) || n != int(chunkSize) {
+ t.Errorf("failed to read cache of offset=%d, size=%d: %v(got size=%d)", chunkOffset, chunkSize, err, n)
+ return
+ }
+ nr += n
+ cn++
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func newExceptFile(t *testing.T, fr metadata.File, except ...region) metadata.File {
+ er := exceptFile{fr: fr, t: t}
+ er.except = map[region]bool{}
+ for _, reg := range except {
+ er.except[reg] = true
+ }
+ return &er
+}
+
+type exceptFile struct {
+ fr metadata.File
+ except map[region]bool
+ t *testing.T
+}
+
+func (er *exceptFile) ReadAt(p []byte, offset int64) (int, error) {
+ if er.except[region{offset, offset + int64(len(p)) - 1}] {
+ er.t.Fatalf("Requested prohibited region of chunk: (%d, %d)", offset, offset+int64(len(p))-1)
+ }
+ return er.fr.ReadAt(p, offset)
+}
+
+func (er *exceptFile) ChunkEntryForOffset(offset int64) (off int64, size int64, dgst string, ok bool) {
+ return er.fr.ChunkEntryForOffset(offset)
+}
+
+func makeFile(t *testing.T, contents []byte, chunkSize int, factory metadata.Store, comp tutil.Compression) (*file, func() error) {
+ testName := "test"
+ sr, dgst, err := tutil.BuildEStargz([]tutil.TarEntry{
+ tutil.File(testName, string(contents)),
+ }, tutil.WithEStargzOptions(estargz.WithChunkSize(chunkSize), estargz.WithCompression(comp)))
+ if err != nil {
+ t.Fatalf("failed to build sample estargz")
+ }
+ mr, err := factory(sr, metadata.WithDecompressors(comp))
+ if err != nil {
+ t.Fatalf("failed to create reader: %v", err)
+ }
+ vr, err := NewReader(mr, cache.NewMemoryCache(), digest.FromString(""))
+ if err != nil {
+ mr.Close()
+ t.Fatalf("failed to make new reader: %v", err)
+ }
+ r, err := vr.VerifyTOC(dgst)
+ if err != nil {
+ vr.Close()
+ t.Fatalf("failed to verify TOC: %v", err)
+ }
+ tid, _, err := r.Metadata().GetChild(r.Metadata().RootID(), testName)
+ if err != nil {
+ vr.Close()
+ t.Fatalf("failed to get %q: %v", testName, err)
+ }
+ ra, err := r.OpenFile(tid)
+ if err != nil {
+ vr.Close()
+ t.Fatalf("Failed to open testing file: %v", err)
+ }
+ f, ok := ra.(*file)
+ if !ok {
+ vr.Close()
+ t.Fatalf("invalid type of file %q", tid)
+ }
+ return f, vr.Close
+}
+
+func testCacheVerify(t *testing.T, factory metadata.Store) {
+ for _, skipVerify := range [2]bool{true, false} {
+ for _, invalidChunkBeforeVerify := range [2]bool{true, false} {
+ for _, invalidChunkAfterVerify := range [2]bool{true, false} {
+ for srcCompressionName, srcCompression := range srcCompressions {
+ srcCompression := srcCompression()
+ name := fmt.Sprintf("test_cache_verify_%v_%v_%v_%v",
+ skipVerify, invalidChunkBeforeVerify, invalidChunkAfterVerify, srcCompressionName)
+ t.Run(name, func(t *testing.T) {
+ sr, tocDgst, err := tutil.BuildEStargz([]tutil.TarEntry{
+ tutil.File("a", sampleData1+"a"),
+ tutil.File("b", sampleData1+"b"),
+ }, tutil.WithEStargzOptions(estargz.WithChunkSize(sampleChunkSize), estargz.WithCompression(srcCompression)))
+ if err != nil {
+ t.Fatalf("failed to build sample estargz")
+ }
+
+ // Determine the expected behaviour
+ var wantVerifyFail, wantCacheFail, wantCacheFail2 bool
+ if skipVerify {
+ // always no error if verification is disabled
+ wantVerifyFail, wantCacheFail, wantCacheFail2 = false, false, false
+ } else if invalidChunkBeforeVerify {
+ // errors occurred before verifying TOC must be reported via VerifyTOC()
+ wantVerifyFail = true
+ } else if invalidChunkAfterVerify {
+ // errors occurred after verifying TOC must be reported via Cache()
+ wantVerifyFail, wantCacheFail, wantCacheFail2 = false, true, true
+ } else {
+ // otherwise no verification error
+ wantVerifyFail, wantCacheFail, wantCacheFail2 = false, false, false
+ }
+
+ // Prepare reader
+ verifier := &failIDVerifier{}
+ mr, err := factory(sr, metadata.WithDecompressors(srcCompression))
+ if err != nil {
+ t.Fatalf("failed to prepare reader %v", err)
+ }
+ defer mr.Close()
+ vr, err := NewReader(mr, cache.NewMemoryCache(), digest.FromString(""))
+ if err != nil {
+ t.Fatalf("failed to make new reader: %v", err)
+ }
+ vr.verifier = verifier.verifier
+ vr.r.verifier = verifier.verifier
+
+ off2id, id2path, err := prepareMap(vr.Metadata(), vr.Metadata().RootID(), "")
+ if err != nil || off2id == nil || id2path == nil {
+ t.Fatalf("failed to prepare offset map %v, off2id = %+v, id2path = %+v", err, off2id, id2path)
+ }
+
+ // Perform Cache() before verification
+ // 1. Either of "a" or "b" is read and verified
+ // 2. VerifyTOC/SkipVerify is called
+ // 3. Another entry ("a" or "b") is called
+ verifyDone := make(chan struct{})
+ var firstEntryCalled bool
+ var eg errgroup.Group
+ var mu sync.Mutex
+ eg.Go(func() error {
+ return vr.Cache(WithFilter(func(off int64) bool {
+ id, ok := off2id[off]
+ if !ok {
+ t.Fatalf("no ID is assigned to offset %d", off)
+ }
+ name, ok := id2path[id]
+ if !ok {
+ t.Fatalf("no name is assigned to id %d", id)
+ }
+ if name == "a" || name == "b" {
+ mu.Lock()
+ if !firstEntryCalled {
+ firstEntryCalled = true
+ if invalidChunkBeforeVerify {
+ verifier.registerFails([]uint32{id})
+ }
+ mu.Unlock()
+ return true
+ }
+ mu.Unlock()
+ <-verifyDone
+ if invalidChunkAfterVerify {
+ verifier.registerFails([]uint32{id})
+ }
+ return true
+ }
+ return false
+ }))
+ })
+ if invalidChunkBeforeVerify {
+ // wait for encountering the error of the first chunk read
+ start := time.Now()
+ for {
+ if err := vr.loadLastVerifyErr(); err != nil {
+ break
+ }
+ if time.Since(start) > time.Second {
+ t.Fatalf("timeout(1s): failed to wait for read error is registered")
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+
+ // Perform verification
+ if skipVerify {
+ vr.SkipVerify()
+ } else {
+ _, err = vr.VerifyTOC(tocDgst)
+ }
+ if checkErr := checkError(wantVerifyFail, err); checkErr != nil {
+ t.Errorf("verify: %v", checkErr)
+ return
+ }
+ if err != nil {
+ return
+ }
+ close(verifyDone)
+
+ // Check the result of Cache()
+ if checkErr := checkError(wantCacheFail, eg.Wait()); checkErr != nil {
+ t.Errorf("cache: %v", checkErr)
+ return
+ }
+
+ // Call Cache() again and check the result
+ if checkErr := checkError(wantCacheFail2, vr.Cache()); checkErr != nil {
+ t.Errorf("cache(2): %v", checkErr)
+ return
+ }
+ })
+ }
+ }
+ }
+ }
+}
+
+type failIDVerifier struct {
+ fails []uint32
+ failsMu sync.Mutex
+}
+
+func (f *failIDVerifier) registerFails(fails []uint32) {
+ f.failsMu.Lock()
+ defer f.failsMu.Unlock()
+ f.fails = fails
+
+}
+
+func (f *failIDVerifier) verifier(id uint32, chunkDigest string) (digest.Verifier, error) {
+ f.failsMu.Lock()
+ defer f.failsMu.Unlock()
+ success := true
+ for _, n := range f.fails {
+ if n == id {
+ success = false
+ break
+ }
+ }
+ return &testVerifier{success}, nil
+}
+
+type testVerifier struct {
+ success bool
+}
+
+func (bv *testVerifier) Write(p []byte) (n int, err error) {
+ return len(p), nil
+}
+
+func (bv *testVerifier) Verified() bool {
+ return bv.success
+}
+
+func checkError(wantFail bool, err error) error {
+ if wantFail && err == nil {
+ return fmt.Errorf("wanted to fail but succeeded")
+ } else if !wantFail && err != nil {
+ return fmt.Errorf("wanted to succeed verification but failed: %w", err)
+ }
+ return nil
+}
+
+func prepareMap(mr metadata.Reader, id uint32, p string) (off2id map[int64]uint32, id2path map[uint32]string, _ error) {
+ attr, err := mr.GetAttr(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ id2path = map[uint32]string{id: p}
+ off2id = make(map[int64]uint32)
+ if attr.Mode.IsRegular() {
+ off, err := mr.GetOffset(id)
+ if err != nil {
+ return nil, nil, err
+ }
+ off2id[off] = id
+ }
+ var retErr error
+ mr.ForeachChild(id, func(name string, id uint32, mode os.FileMode) bool {
+ o2i, i2p, err := prepareMap(mr, id, path.Join(p, name))
+ if err != nil {
+ retErr = err
+ return false
+ }
+ for k, v := range o2i {
+ off2id[k] = v
+ }
+ for k, v := range i2p {
+ id2path[k] = v
+ }
+ return true
+ })
+ if retErr != nil {
+ return nil, nil, retErr
+ }
+ return off2id, id2path, nil
+}
+
+func testFailReader(t *testing.T, factory metadata.Store) {
+ testFileName := "test"
+ for srcCompressionName, srcCompression := range srcCompressions {
+ srcCompression := srcCompression()
+ t.Run(fmt.Sprintf("%v", srcCompressionName), func(t *testing.T) {
+ for _, rs := range []bool{true, false} {
+ for _, vs := range []bool{true, false} {
+ stargzFile, tocDigest, err := tutil.BuildEStargz([]tutil.TarEntry{
+ tutil.File(testFileName, sampleData1),
+ }, tutil.WithEStargzOptions(estargz.WithChunkSize(sampleChunkSize), estargz.WithCompression(srcCompression)))
+ if err != nil {
+ t.Fatalf("failed to build sample estargz")
+ }
+
+ br := &breakReaderAt{
+ ReaderAt: stargzFile,
+ success: true,
+ }
+ bev := &testChunkVerifier{true}
+ mcache := cache.NewMemoryCache()
+ mr, err := factory(io.NewSectionReader(br, 0, stargzFile.Size()), metadata.WithDecompressors(srcCompression))
+ if err != nil {
+ t.Fatalf("failed to prepare metadata reader")
+ }
+ defer mr.Close()
+ vr, err := NewReader(mr, mcache, digest.FromString(""))
+ if err != nil {
+ t.Fatalf("failed to make new reader: %v", err)
+ }
+ defer vr.Close()
+ vr.verifier = bev.verifier
+ vr.r.verifier = bev.verifier
+ gr, err := vr.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Fatalf("failed to verify TOC: %v", err)
+ }
+
+ notexist := uint32(0)
+ found := false
+ for i := uint32(0); i < 1000000; i++ {
+ if _, err := gr.Metadata().GetAttr(i); err != nil {
+ notexist, found = i, true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("free ID not found")
+ }
+
+ // tests for opening non-existing file
+ _, err = gr.OpenFile(notexist)
+ if err == nil {
+ t.Errorf("succeeded to open file but wanted to fail")
+ return
+ }
+
+ // tests failure behaviour of a file read
+ tid, _, err := gr.Metadata().GetChild(gr.Metadata().RootID(), testFileName)
+ if err != nil {
+ t.Errorf("failed to get %q: %v", testFileName, err)
+ return
+ }
+ fr, err := gr.OpenFile(tid)
+ if err != nil {
+ t.Errorf("failed to open file but wanted to succeed: %v", err)
+ return
+ }
+
+ mcache.(*cache.MemoryCache).Membuf = map[string]*bytes.Buffer{}
+ br.success = rs
+ bev.success = vs
+
+ // tests for reading file
+ p := make([]byte, len(sampleData1))
+ n, err := fr.ReadAt(p, 0)
+ if rs && vs {
+ if err != nil || n != len(sampleData1) || !bytes.Equal([]byte(sampleData1), p) {
+ t.Errorf("failed to read data but wanted to succeed: %v", err)
+ return
+ }
+ } else {
+ if err == nil {
+ t.Errorf("succeeded to read data but wanted to fail (reader:%v,verify:%v)", rs, vs)
+ return
+ }
+ }
+ }
+ }
+ })
+ }
+}
+
+type breakReaderAt struct {
+ io.ReaderAt
+ success bool
+}
+
+func (br *breakReaderAt) ReadAt(p []byte, off int64) (int, error) {
+ if br.success {
+ return br.ReaderAt.ReadAt(p, off)
+ }
+ return 0, fmt.Errorf("failed")
+}
+
+type testChunkVerifier struct {
+ success bool
+}
+
+func (bev *testChunkVerifier) verifier(id uint32, chunkDigest string) (digest.Verifier, error) {
+ return &testVerifier{bev.success}, nil
+}
+
+func testPreReader(t *testing.T, factory metadata.Store) {
+ data64KB := string(tutil.RandomBytes(t, 64000))
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize int
+ in []tutil.TarEntry
+ want []check
+ }{
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/foo1", data64KB),
+ tutil.File("foo2", "bb"),
+ tutil.File("foo22", "ccc"),
+ tutil.Dir("bar/"),
+ tutil.File("bar/bar.txt", "aaa"),
+ tutil.File("foo3", data64KB),
+ },
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ // landmark+dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ want: []check{
+ hasFileContentsWithPreCached("foo22", 0, "ccc", chunkInfo{"foo2", "bb", 0, 2}, chunkInfo{"bar/bar.txt", "aaa", 0, 3}, chunkInfo{"foo3", data64KB, 0, 64000}),
+ hasFileContentsOffset("foo2", 0, "bb", true),
+ hasFileContentsOffset("bar/bar.txt", 0, "aaa", true),
+ hasFileContentsOffset("bar/bar.txt", 1, "aa", true),
+ hasFileContentsOffset("bar/bar.txt", 2, "a", true),
+ hasFileContentsOffset("foo3", 0, data64KB, true),
+ hasFileContentsOffset("foo22", 0, "ccc", true),
+ hasFileContentsOffset("foo/foo1", 0, data64KB, false),
+ hasFileContentsOffset("foo/foo1", 0, data64KB, true),
+ hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true),
+ hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true),
+ hasFileContentsOffset("foo/foo1", 3, data64KB[3:], true),
+ },
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: []tutil.TarEntry{
+ tutil.Dir("foo/"),
+ tutil.File("foo/foo1", data64KB),
+ tutil.File("foo2", "bb"),
+ tutil.Dir("bar/"),
+ tutil.File("foo3", data64KB),
+ },
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ // landmark+dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
+ want: []check{
+ hasFileContentsWithPreCached("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000], 0, 32000}),
+ hasFileContentsOffset("foo2", 0, "bb", true),
+ hasFileContentsOffset("foo2", 1, "b", true),
+ hasFileContentsOffset("foo3", 0, data64KB[:len(data64KB)/2], true),
+ hasFileContentsOffset("foo3", 1, data64KB[1:len(data64KB)/2], true),
+ hasFileContentsOffset("foo3", 2, data64KB[2:len(data64KB)/2], true),
+ hasFileContentsOffset("foo3", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], false),
+ hasFileContentsOffset("foo3", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true),
+ hasFileContentsOffset("foo/foo1", 0, data64KB, false),
+ hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true),
+ hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true),
+ hasFileContentsOffset("foo/foo1", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], true),
+ hasFileContentsOffset("foo/foo1", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true),
+ },
+ },
+ }
+ for _, tt := range tests {
+ for srcCompresionName, srcCompression := range srcCompressions {
+ srcCompression := srcCompression()
+ t.Run(tt.name+"-"+srcCompresionName, func(t *testing.T) {
+ opts := []tutil.BuildEStargzOption{
+ tutil.WithEStargzOptions(estargz.WithCompression(srcCompression)),
+ }
+ if tt.chunkSize > 0 {
+ opts = append(opts, tutil.WithEStargzOptions(estargz.WithChunkSize(tt.chunkSize)))
+ }
+ if tt.minChunkSize > 0 {
+ t.Logf("minChunkSize = %d", tt.minChunkSize)
+ opts = append(opts, tutil.WithEStargzOptions(estargz.WithMinChunkSize(tt.minChunkSize)))
+ }
+ esgz, tocDgst, err := tutil.BuildEStargz(tt.in, opts...)
+ if err != nil {
+ t.Fatalf("failed to build sample eStargz: %v", err)
+ }
+ testR := &calledReaderAt{esgz, nil}
+ mr, err := factory(io.NewSectionReader(testR, 0, esgz.Size()), metadata.WithDecompressors(srcCompression))
+ if err != nil {
+ t.Fatalf("failed to create new reader: %v", err)
+ }
+ defer mr.Close()
+ memcache := cache.NewMemoryCache()
+ vr, err := NewReader(mr, memcache, digest.FromString(""))
+ if err != nil {
+ t.Fatalf("failed to make new reader: %v", err)
+ }
+ rr, err := vr.VerifyTOC(tocDgst)
+ if err != nil {
+ t.Fatalf("failed to verify TOC: %v", err)
+ }
+ r := rr.(*reader)
+ for _, want := range tt.want {
+ want(t, r, testR)
+ }
+ })
+ }
+ }
+}
+
+type check func(*testing.T, *reader, *calledReaderAt)
+
+type chunkInfo struct {
+ name string
+ data string
+ chunkOffset int64
+ chunkSize int64
+}
+
+func hasFileContentsOffset(name string, off int64, contents string, fromCache bool) check {
+ return func(t *testing.T, r *reader, cr *calledReaderAt) {
+ tid, err := lookup(r, name)
+ if err != nil {
+ t.Fatalf("failed to lookup %q", name)
+ }
+ ra, err := r.OpenFile(tid)
+ if err != nil {
+ t.Fatalf("Failed to open testing file: %v", err)
+ }
+ cr.called = nil // reset test
+ buf := make([]byte, len(contents))
+ n, err := ra.ReadAt(buf, off)
+ if err != nil {
+ t.Fatalf("failed to readat %q: %v", name, err)
+ }
+ if n != len(contents) {
+ t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf))
+ }
+ if string(buf) != contents {
+ t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents)))
+ }
+ t.Logf("reader calls for %q: offsets: %+v", name, cr.called)
+ if fromCache {
+ if len(cr.called) != 0 {
+ t.Fatalf("unexpected read on %q: offsets: %v", name, cr.called)
+ }
+ } else {
+ if len(cr.called) == 0 {
+ t.Fatalf("no call happened to reader for %q", name)
+ }
+ }
+ }
+}
+
+func hasFileContentsWithPreCached(name string, off int64, contents string, extra ...chunkInfo) check {
+ return func(t *testing.T, r *reader, cr *calledReaderAt) {
+ tid, err := lookup(r, name)
+ if err != nil {
+ t.Fatalf("failed to lookup %q", name)
+ }
+ ra, err := r.OpenFile(tid)
+ if err != nil {
+ t.Fatalf("Failed to open testing file: %v", err)
+ }
+ buf := make([]byte, len(contents))
+ n, err := ra.ReadAt(buf, off)
+ if err != nil {
+ t.Fatalf("failed to readat %q: %v", name, err)
+ }
+ if n != len(contents) {
+ t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf))
+ }
+ if string(buf) != contents {
+ t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents)))
+ }
+ for _, e := range extra {
+ eid, err := lookup(r, e.name)
+ if err != nil {
+ t.Fatalf("failed to lookup %q", e.name)
+ }
+ cacheID := genID(eid, e.chunkOffset, e.chunkSize)
+ er, err := r.cache.Get(cacheID)
+ if err != nil {
+ t.Fatalf("failed to get cache %q: %+v", cacheID, e)
+ }
+ data, err := io.ReadAll(io.NewSectionReader(er, 0, e.chunkSize))
+ er.Close()
+ if err != nil {
+ t.Fatalf("failed to read cache %q: %+v", cacheID, e)
+ }
+ if string(data) != e.data {
+ t.Fatalf("unexpected contents of cache %q (%+v): %q; wanted %q", cacheID, e, longBytesView(data), longBytesView([]byte(e.data)))
+ }
+ }
+ }
+}
+
+func lookup(r *reader, name string) (uint32, error) {
+ name = strings.TrimPrefix(path.Clean("/"+name), "/")
+ if name == "" {
+ return r.Metadata().RootID(), nil
+ }
+ dir, base := filepath.Split(name)
+ pid, err := lookup(r, dir)
+ if err != nil {
+ return 0, err
+ }
+ id, _, err := r.Metadata().GetChild(pid, base)
+ return id, err
+}
+
+type calledReaderAt struct {
+ io.ReaderAt
+ called []int64
+}
+
+func (r *calledReaderAt) ReadAt(p []byte, off int64) (int, error) {
+ r.called = append(r.called, off)
+ return r.ReaderAt.ReadAt(p, off)
+}
+
+// longBytesView is an alias of []byte suitable for printing a long data as an omitted string to avoid long data being printed.
+type longBytesView []byte
+
+func (b longBytesView) String() string {
+ if len(b) < 100 {
+ return string(b)
+ }
+ return string(b[:50]) + "...(omit)..." + string(b[len(b)-50:])
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go
new file mode 100644
index 000000000000..fcf6796b61d6
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go
@@ -0,0 +1,535 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package remote
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/stargz-snapshotter/cache"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/sync/singleflight"
+)
+
+var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
+
+type Blob interface {
+ Check() error
+ Size() int64
+ FetchedSize() int64
+ ReadAt(p []byte, offset int64, opts ...Option) (int, error)
+ Cache(offset int64, size int64, opts ...Option) error
+ Refresh(ctx context.Context, host source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error
+ Close() error
+}
+
+type blob struct {
+ fetcher fetcher
+ fetcherMu sync.Mutex
+
+ size int64
+ chunkSize int64
+ prefetchChunkSize int64
+ cache cache.BlobCache
+ lastCheck time.Time
+ lastCheckMu sync.Mutex
+ checkInterval time.Duration
+ fetchTimeout time.Duration
+
+ fetchedRegionSet regionSet
+ fetchedRegionSetMu sync.Mutex
+ fetchedRegionGroup singleflight.Group
+ fetchedRegionCopyMu sync.Mutex
+
+ resolver *Resolver
+
+ closed bool
+ closedMu sync.Mutex
+}
+
+func makeBlob(fetcher fetcher, size int64, chunkSize int64, prefetchChunkSize int64,
+ blobCache cache.BlobCache, lastCheck time.Time, checkInterval time.Duration,
+ r *Resolver, fetchTimeout time.Duration) *blob {
+ return &blob{
+ fetcher: fetcher,
+ size: size,
+ chunkSize: chunkSize,
+ prefetchChunkSize: prefetchChunkSize,
+ cache: blobCache,
+ lastCheck: lastCheck,
+ checkInterval: checkInterval,
+ resolver: r,
+ fetchTimeout: fetchTimeout,
+ }
+}
+
+func (b *blob) Close() error {
+ b.closedMu.Lock()
+ defer b.closedMu.Unlock()
+ if b.closed {
+ return nil
+ }
+ b.closed = true
+ return b.cache.Close()
+}
+
+func (b *blob) isClosed() bool {
+ b.closedMu.Lock()
+ closed := b.closed
+ b.closedMu.Unlock()
+ return closed
+}
+
+func (b *blob) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
+ if b.isClosed() {
+ return fmt.Errorf("blob is already closed")
+ }
+
+ // refresh the fetcher
+ f, newSize, err := b.resolver.resolveFetcher(ctx, hosts, refspec, desc)
+ if err != nil {
+ return err
+ }
+ if newSize != b.size {
+ return fmt.Errorf("Invalid size of new blob %d; want %d", newSize, b.size)
+ }
+
+ // update the blob's fetcher with new one
+ b.fetcherMu.Lock()
+ b.fetcher = f
+ b.fetcherMu.Unlock()
+ b.lastCheckMu.Lock()
+ b.lastCheck = time.Now()
+ b.lastCheckMu.Unlock()
+
+ return nil
+}
+
+func (b *blob) Check() error {
+ if b.isClosed() {
+ return fmt.Errorf("blob is already closed")
+ }
+
+ now := time.Now()
+ b.lastCheckMu.Lock()
+ lastCheck := b.lastCheck
+ b.lastCheckMu.Unlock()
+ if now.Sub(lastCheck) < b.checkInterval {
+ // do nothing if not expired
+ return nil
+ }
+ b.fetcherMu.Lock()
+ fr := b.fetcher
+ b.fetcherMu.Unlock()
+ err := fr.check()
+ if err == nil {
+ // update lastCheck only if check succeeded.
+ // on failure, we should check this layer next time again.
+ b.lastCheckMu.Lock()
+ b.lastCheck = now
+ b.lastCheckMu.Unlock()
+ }
+
+ return err
+}
+
+func (b *blob) Size() int64 {
+ return b.size
+}
+
+func (b *blob) FetchedSize() int64 {
+ b.fetchedRegionSetMu.Lock()
+ sz := b.fetchedRegionSet.totalSize()
+ b.fetchedRegionSetMu.Unlock()
+ return sz
+}
+
+func makeSyncKey(allData map[region]io.Writer) string {
+ keys := make([]string, len(allData))
+ keysIndex := 0
+ for key := range allData {
+ keys[keysIndex] = fmt.Sprintf("[%d,%d]", key.b, key.e)
+ keysIndex++
+ }
+ sort.Strings(keys)
+ return strings.Join(keys, ",")
+}
+
+func (b *blob) cacheAt(offset int64, size int64, fr fetcher, cacheOpts *options) error {
+ fetchReg := region{floor(offset, b.chunkSize), ceil(offset+size-1, b.chunkSize) - 1}
+ discard := make(map[region]io.Writer)
+
+ err := b.walkChunks(fetchReg, func(reg region) error {
+ if r, err := b.cache.Get(fr.genID(reg), cacheOpts.cacheOpts...); err == nil {
+ return r.Close() // nop if the cache hits
+ }
+ discard[reg] = io.Discard
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return b.fetchRange(discard, cacheOpts)
+}
+
+func (b *blob) Cache(offset int64, size int64, opts ...Option) error {
+ if b.isClosed() {
+ return fmt.Errorf("blob is already closed")
+ }
+
+ var cacheOpts options
+ for _, o := range opts {
+ o(&cacheOpts)
+ }
+
+ b.fetcherMu.Lock()
+ fr := b.fetcher
+ b.fetcherMu.Unlock()
+
+ if b.prefetchChunkSize <= b.chunkSize {
+ return b.cacheAt(offset, size, fr, &cacheOpts)
+ }
+
+ eg, _ := errgroup.WithContext(context.Background())
+
+ fetchSize := b.chunkSize * (b.prefetchChunkSize / b.chunkSize)
+
+ end := offset + size
+ for i := offset; i < end; i += fetchSize {
+ i, l := i, fetchSize
+ if i+l > end {
+ l = end - i
+ }
+ eg.Go(func() error {
+ return b.cacheAt(i, l, fr, &cacheOpts)
+ })
+ }
+
+ return eg.Wait()
+}
+
+// ReadAt reads remote chunks from specified offset for the buffer size.
+// It tries to fetch as many chunks as possible from local cache.
+// We can configure this function with options.
+func (b *blob) ReadAt(p []byte, offset int64, opts ...Option) (int, error) {
+ if b.isClosed() {
+ return 0, fmt.Errorf("blob is already closed")
+ }
+
+ if len(p) == 0 || offset > b.size {
+ return 0, nil
+ }
+
+ // Make the buffer chunk aligned
+ allRegion := region{floor(offset, b.chunkSize), ceil(offset+int64(len(p))-1, b.chunkSize) - 1}
+ allData := make(map[region]io.Writer)
+
+ var readAtOpts options
+ for _, o := range opts {
+ o(&readAtOpts)
+ }
+
+ // Fetcher can be suddenly updated so we take and use the snapshot of it for
+ // consistency.
+ b.fetcherMu.Lock()
+ fr := b.fetcher
+ b.fetcherMu.Unlock()
+
+ b.walkChunks(allRegion, func(chunk region) error {
+ var (
+ base = positive(chunk.b - offset)
+ lowerUnread = positive(offset - chunk.b)
+ upperUnread = positive(chunk.e + 1 - (offset + int64(len(p))))
+ expectedSize = chunk.size() - upperUnread - lowerUnread
+ )
+
+ // Check if the content exists in the cache
+ r, err := b.cache.Get(fr.genID(chunk), readAtOpts.cacheOpts...)
+ if err == nil {
+ defer r.Close()
+ n, err := r.ReadAt(p[base:base+expectedSize], lowerUnread)
+ if (err == nil || err == io.EOF) && int64(n) == expectedSize {
+ return nil
+ }
+ }
+
+ // We missed cache. Take it from remote registry.
+ // We get the whole chunk here and add it to the cache so that following
+ // reads against neighboring chunks can take the data without making HTTP requests.
+ allData[chunk] = newBytesWriter(p[base:base+expectedSize], lowerUnread)
+ return nil
+ })
+
+ // Read required data
+ if err := b.fetchRange(allData, &readAtOpts); err != nil {
+ return 0, err
+ }
+
+ // Adjust the buffer size according to the blob size
+ if remain := b.size - offset; int64(len(p)) >= remain {
+ if remain < 0 {
+ remain = 0
+ }
+ p = p[:remain]
+ }
+
+ return len(p), nil
+}
+
+// fetchRegions fetches all specified chunks from remote blob and puts it in the local cache.
+// It must be called from within fetchRange and need to ensure that it is inside the singleflight `Do` operation.
+func (b *blob) fetchRegions(allData map[region]io.Writer, fetched map[region]bool, opts *options) error {
+ if len(allData) == 0 {
+ return nil
+ }
+
+ // Fetcher can be suddenly updated so we take and use the snapshot of it for
+ // consistency.
+ b.fetcherMu.Lock()
+ fr := b.fetcher
+ b.fetcherMu.Unlock()
+
+ // request missed regions
+ var req []region
+ for reg := range allData {
+ req = append(req, reg)
+ fetched[reg] = false
+ }
+
+ fetchCtx, cancel := context.WithTimeout(context.Background(), b.fetchTimeout)
+ defer cancel()
+ if opts.ctx != nil {
+ fetchCtx = opts.ctx
+ }
+ mr, err := fr.fetch(fetchCtx, req, true)
+
+ if err != nil {
+ return err
+ }
+ defer mr.Close()
+
+ // Update the check timer because we succeeded to access the blob
+ b.lastCheckMu.Lock()
+ b.lastCheck = time.Now()
+ b.lastCheckMu.Unlock()
+
+ // chunk and cache responsed data. Regions must be aligned by chunk size.
+ // TODO: Reorganize remoteData to make it be aligned by chunk size
+ for {
+ reg, p, err := mr.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return fmt.Errorf("failed to read multipart resp: %w", err)
+ }
+ if err := b.walkChunks(reg, func(chunk region) (retErr error) {
+ id := fr.genID(chunk)
+ cw, err := b.cache.Add(id, opts.cacheOpts...)
+ if err != nil {
+ return err
+ }
+ defer cw.Close()
+ w := io.Writer(cw)
+
+ // If this chunk is one of the targets, write the content to the
+ // passed reader too.
+ if _, ok := fetched[chunk]; ok {
+ w = io.MultiWriter(w, allData[chunk])
+ }
+
+ // Copy the target chunk
+ if _, err := io.CopyN(w, p, chunk.size()); err != nil {
+ cw.Abort()
+ return err
+ }
+
+ // Add the target chunk to the cache
+ if err := cw.Commit(); err != nil {
+ return err
+ }
+
+ b.fetchedRegionSetMu.Lock()
+ b.fetchedRegionSet.add(chunk)
+ b.fetchedRegionSetMu.Unlock()
+ fetched[chunk] = true
+ return nil
+ }); err != nil {
+ return fmt.Errorf("failed to get chunks: %w", err)
+ }
+ }
+
+ // Check all chunks are fetched
+ var unfetched []region
+ for c, b := range fetched {
+ if !b {
+ unfetched = append(unfetched, c)
+ }
+ }
+ if unfetched != nil {
+ return fmt.Errorf("failed to fetch region %v", unfetched)
+ }
+
+ return nil
+}
+
+// fetchRange fetches all specified chunks from local cache and remote blob.
+func (b *blob) fetchRange(allData map[region]io.Writer, opts *options) error {
+ if len(allData) == 0 {
+ return nil
+ }
+
+ // We build a key based on regions we need to fetch and pass it to singleflightGroup.Do(...)
+ // to block simultaneous same requests. Once the request is finished and the data is ready,
+ // all blocked callers will be unblocked and that same data will be returned by all blocked callers.
+ key := makeSyncKey(allData)
+ fetched := make(map[region]bool)
+ _, err, shared := b.fetchedRegionGroup.Do(key, func() (interface{}, error) {
+ return nil, b.fetchRegions(allData, fetched, opts)
+ })
+
+ // When unblocked try to read from cache in case if there were no errors
+ // If we fail reading from cache, fetch from remote registry again
+ if err == nil && shared {
+ for reg := range allData {
+ if _, ok := fetched[reg]; ok {
+ continue
+ }
+ err = b.walkChunks(reg, func(chunk region) error {
+ b.fetcherMu.Lock()
+ fr := b.fetcher
+ b.fetcherMu.Unlock()
+
+ // Check if the content exists in the cache
+ // And if exists, read from cache
+ r, err := b.cache.Get(fr.genID(chunk), opts.cacheOpts...)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ rr := io.NewSectionReader(r, 0, chunk.size())
+
+ // Copy the target chunk
+ b.fetchedRegionCopyMu.Lock()
+ defer b.fetchedRegionCopyMu.Unlock()
+ if _, err := io.CopyN(allData[chunk], rr, chunk.size()); err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ break
+ }
+ }
+
+ // if we cannot read the data from cache, do fetch again
+ if err != nil {
+ return b.fetchRange(allData, opts)
+ }
+ }
+
+ return err
+}
+
+type walkFunc func(reg region) error
+
+// walkChunks walks chunks from begin to end in order in the specified region.
+// specified region must be aligned by chunk size.
+func (b *blob) walkChunks(allRegion region, walkFn walkFunc) error {
+ if allRegion.b%b.chunkSize != 0 {
+ return fmt.Errorf("region (%d, %d) must be aligned by chunk size",
+ allRegion.b, allRegion.e)
+ }
+ for i := allRegion.b; i <= allRegion.e && i < b.size; i += b.chunkSize {
+ reg := region{i, i + b.chunkSize - 1}
+ if reg.e >= b.size {
+ reg.e = b.size - 1
+ }
+ if err := walkFn(reg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newBytesWriter(dest []byte, destOff int64) io.Writer {
+ return &bytesWriter{
+ dest: dest,
+ destOff: destOff,
+ current: 0,
+ }
+}
+
+type bytesWriter struct {
+ dest []byte
+ destOff int64
+ current int64
+}
+
+func (bw *bytesWriter) Write(p []byte) (int, error) {
+ defer func() { bw.current = bw.current + int64(len(p)) }()
+
+ var (
+ destBase = positive(bw.current - bw.destOff)
+ pBegin = positive(bw.destOff - bw.current)
+ pEnd = positive(bw.destOff + int64(len(bw.dest)) - bw.current)
+ )
+
+ if destBase > int64(len(bw.dest)) {
+ return len(p), nil
+ }
+ if pBegin >= int64(len(p)) {
+ return len(p), nil
+ }
+ if pEnd > int64(len(p)) {
+ pEnd = int64(len(p))
+ }
+
+ copy(bw.dest[destBase:], p[pBegin:pEnd])
+
+ return len(p), nil
+}
+
+func floor(n int64, unit int64) int64 {
+ return (n / unit) * unit
+}
+
+func ceil(n int64, unit int64) int64 {
+ return (n/unit + 1) * unit
+}
+
+func positive(n int64) int64 {
+ if n < 0 {
+ return 0
+ }
+ return n
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go
new file mode 100644
index 000000000000..46c28d374429
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go
@@ -0,0 +1,728 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package remote
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "math/big"
+ "mime"
+ "mime/multipart"
+ "net/http"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/cache"
+ "github.com/containerd/stargz-snapshotter/fs/config"
+ commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ "github.com/hashicorp/go-multierror"
+ rhttp "github.com/hashicorp/go-retryablehttp"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ defaultChunkSize = 50000
+ defaultValidIntervalSec = 60
+ defaultFetchTimeoutSec = 300
+
+ defaultMaxRetries = 5
+ defaultMinWaitMSec = 30
+ defaultMaxWaitMSec = 300000
+)
+
+func NewResolver(cfg config.BlobConfig, handlers map[string]Handler) *Resolver {
+ if cfg.ChunkSize == 0 { // zero means "use default chunk size"
+ cfg.ChunkSize = defaultChunkSize
+ }
+ if cfg.ValidInterval == 0 { // zero means "use default interval"
+ cfg.ValidInterval = defaultValidIntervalSec
+ }
+ if cfg.CheckAlways {
+ cfg.ValidInterval = 0
+ }
+ if cfg.FetchTimeoutSec == 0 {
+ cfg.FetchTimeoutSec = defaultFetchTimeoutSec
+ }
+ if cfg.MaxRetries == 0 {
+ cfg.MaxRetries = defaultMaxRetries
+ }
+ if cfg.MinWaitMSec == 0 {
+ cfg.MinWaitMSec = defaultMinWaitMSec
+ }
+ if cfg.MaxWaitMSec == 0 {
+ cfg.MaxWaitMSec = defaultMaxWaitMSec
+ }
+
+ return &Resolver{
+ blobConfig: cfg,
+ handlers: handlers,
+ }
+}
+
+type Resolver struct {
+ blobConfig config.BlobConfig
+ handlers map[string]Handler
+}
+
+type fetcher interface {
+ fetch(ctx context.Context, rs []region, retry bool) (multipartReadCloser, error)
+ check() error
+ genID(reg region) string
+}
+
+func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, blobCache cache.BlobCache) (Blob, error) {
+ f, size, err := r.resolveFetcher(ctx, hosts, refspec, desc)
+ if err != nil {
+ return nil, err
+ }
+ blobConfig := &r.blobConfig
+ return makeBlob(f,
+ size,
+ blobConfig.ChunkSize,
+ blobConfig.PrefetchChunkSize,
+ blobCache,
+ time.Now(),
+ time.Duration(blobConfig.ValidInterval)*time.Second,
+ r,
+ time.Duration(blobConfig.FetchTimeoutSec)*time.Second), nil
+}
+
+func (r *Resolver) resolveFetcher(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (f fetcher, size int64, err error) {
+ blobConfig := &r.blobConfig
+ fc := &fetcherConfig{
+ hosts: hosts,
+ refspec: refspec,
+ desc: desc,
+ maxRetries: blobConfig.MaxRetries,
+ minWaitMSec: time.Duration(blobConfig.MinWaitMSec) * time.Millisecond,
+ maxWaitMSec: time.Duration(blobConfig.MaxWaitMSec) * time.Millisecond,
+ }
+ var handlersErr error
+ for name, p := range r.handlers {
+ // TODO: allow to configure the selection of readers based on the hostname in refspec
+ r, size, err := p.Handle(ctx, desc)
+ if err != nil {
+ handlersErr = multierror.Append(handlersErr, err)
+ continue
+ }
+ log.G(ctx).WithField("handler name", name).WithField("ref", refspec.String()).WithField("digest", desc.Digest).
+ Debugf("contents is provided by a handler")
+ return &remoteFetcher{r}, size, nil
+ }
+
+ log.G(ctx).WithError(handlersErr).WithField("ref", refspec.String()).WithField("digest", desc.Digest).Debugf("using default handler")
+ hf, size, err := newHTTPFetcher(ctx, fc)
+ if err != nil {
+ return nil, 0, err
+ }
+ if blobConfig.ForceSingleRangeMode {
+ hf.singleRangeMode()
+ }
+ return hf, size, err
+}
+
+type fetcherConfig struct {
+ hosts source.RegistryHosts
+ refspec reference.Spec
+ desc ocispec.Descriptor
+ maxRetries int
+ minWaitMSec time.Duration
+ maxWaitMSec time.Duration
+}
+
+func jitter(duration time.Duration) time.Duration {
+ if duration <= 0 {
+ return duration
+ }
+ b, err := rand.Int(rand.Reader, big.NewInt(int64(duration)))
+ if err != nil {
+ panic(err)
+ }
+ return time.Duration(b.Int64() + int64(duration))
+}
+
+// backoffStrategy extends retryablehttp's DefaultBackoff to add a random jitter to avoid overwhelming the repository
+// when it comes back online
+// DefaultBackoff either tries to parse the 'Retry-After' header of the response; or, it uses an exponential backoff
+// 2 ^ numAttempts, limited by max
+func backoffStrategy(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
+ delayTime := rhttp.DefaultBackoff(min, max, attemptNum, resp)
+ return jitter(delayTime)
+}
+
+// retryStrategy extends retryablehttp's DefaultRetryPolicy to debug log the error when retrying
+// DefaultRetryPolicy retries whenever err is non-nil (except for some url errors) or if returned
+// status code is 429 or 5xx (except 501)
+func retryStrategy(ctx context.Context, resp *http.Response, err error) (bool, error) {
+ retry, err2 := rhttp.DefaultRetryPolicy(ctx, resp, err)
+ if retry {
+ log.G(ctx).WithError(err).Debugf("Retrying request")
+ }
+ return retry, err2
+}
+
+func newHTTPFetcher(ctx context.Context, fc *fetcherConfig) (*httpFetcher, int64, error) {
+ reghosts, err := fc.hosts(fc.refspec)
+ if err != nil {
+ return nil, 0, err
+ }
+ desc := fc.desc
+ if desc.Digest.String() == "" {
+ return nil, 0, fmt.Errorf("Digest is mandatory in layer descriptor")
+ }
+ digest := desc.Digest
+ pullScope, err := docker.RepositoryScope(fc.refspec, false)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // Try to create fetcher until succeeded
+ rErr := fmt.Errorf("failed to resolve")
+ for _, host := range reghosts {
+ if host.Host == "" || strings.Contains(host.Host, "/") {
+ rErr = fmt.Errorf("invalid destination (host %q, ref:%q, digest:%q): %w", host.Host, fc.refspec, digest, rErr)
+ continue // Try another
+
+ }
+
+ // Prepare transport with authorization functionality
+ tr := host.Client.Transport
+
+ timeout := host.Client.Timeout
+ if rt, ok := tr.(*rhttp.RoundTripper); ok {
+ rt.Client.RetryMax = fc.maxRetries
+ rt.Client.RetryWaitMin = fc.minWaitMSec
+ rt.Client.RetryWaitMax = fc.maxWaitMSec
+ rt.Client.Backoff = backoffStrategy
+ rt.Client.CheckRetry = retryStrategy
+ timeout = rt.Client.HTTPClient.Timeout
+ }
+
+ if host.Authorizer != nil {
+ tr = &transport{
+ inner: tr,
+ auth: host.Authorizer,
+ scope: pullScope,
+ }
+ }
+
+ // Resolve redirection and get blob URL
+ blobURL := fmt.Sprintf("%s://%s/%s/blobs/%s",
+ host.Scheme,
+ path.Join(host.Host, host.Path),
+ strings.TrimPrefix(fc.refspec.Locator, fc.refspec.Hostname()+"/"),
+ digest)
+ url, header, err := redirect(ctx, blobURL, tr, timeout, host.Header)
+ if err != nil {
+ rErr = fmt.Errorf("failed to redirect (host %q, ref:%q, digest:%q): %v: %w", host.Host, fc.refspec, digest, err, rErr)
+ continue // Try another
+ }
+
+ // Get size information
+ // TODO: we should try to use the Size field in the descriptor here.
+ start := time.Now() // start time before getting layer header
+ size, err := getSize(ctx, url, tr, timeout, header)
+ commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzHeaderGet, digest, start) // time to get layer header
+ if err != nil {
+ rErr = fmt.Errorf("failed to get size (host %q, ref:%q, digest:%q): %v: %w", host.Host, fc.refspec, digest, err, rErr)
+ continue // Try another
+ }
+
+ // Hit one destination
+ return &httpFetcher{
+ url: url,
+ tr: tr,
+ blobURL: blobURL,
+ digest: digest,
+ timeout: timeout,
+ header: header,
+ orgHeader: host.Header,
+ }, size, nil
+ }
+
+ return nil, 0, fmt.Errorf("cannot resolve layer: %w", rErr)
+}
+
+type transport struct {
+ inner http.RoundTripper
+ auth docker.Authorizer
+ scope string
+}
+
+func (tr *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ ctx := docker.WithScope(req.Context(), tr.scope)
+ roundTrip := func(req *http.Request) (*http.Response, error) {
+ // authorize the request using docker.Authorizer
+ if err := tr.auth.Authorize(ctx, req); err != nil {
+ return nil, err
+ }
+
+ // send the request
+ return tr.inner.RoundTrip(req)
+ }
+
+ resp, err := roundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: support more status codes and retries
+ if resp.StatusCode == http.StatusUnauthorized {
+ log.G(ctx).Infof("Received status code: %v. Refreshing creds...", resp.Status)
+
+ // prepare authorization for the target host using docker.Authorizer
+ if err := tr.auth.AddResponses(ctx, []*http.Response{resp}); err != nil {
+ if errdefs.IsNotImplemented(err) {
+ return resp, nil
+ }
+ return nil, err
+ }
+
+ // re-authorize and send the request
+ return roundTrip(req.Clone(ctx))
+ }
+
+ return resp, nil
+}
+
+func redirect(ctx context.Context, blobURL string, tr http.RoundTripper, timeout time.Duration, header http.Header) (url string, withHeader http.Header, err error) {
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+ // We use GET request for redirect.
+ // gcr.io returns 200 on HEAD without Location header (2020).
+ // ghcr.io returns 200 on HEAD without Location header (2020).
+ req, err := http.NewRequestWithContext(ctx, "GET", blobURL, nil)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to make request to the registry: %w", err)
+ }
+ req.Header = http.Header{}
+ for k, v := range header {
+ req.Header[k] = v
+ }
+ req.Close = false
+ req.Header.Set("Range", "bytes=0-1")
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to request: %w", err)
+ }
+ defer func() {
+ io.Copy(io.Discard, res.Body)
+ res.Body.Close()
+ }()
+
+ if res.StatusCode/100 == 2 {
+ url = blobURL
+ withHeader = header
+ } else if redir := res.Header.Get("Location"); redir != "" && res.StatusCode/100 == 3 {
+ // TODO: Support nested redirection
+ url = redir
+ // Do not pass headers to the redirected location.
+ } else {
+ return "", nil, fmt.Errorf("failed to access to the registry with code %v", res.StatusCode)
+ }
+
+ return
+}
+
+func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time.Duration, header http.Header) (int64, error) {
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+ req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
+ if err != nil {
+ return 0, err
+ }
+ req.Header = http.Header{}
+ for k, v := range header {
+ req.Header[k] = v
+ }
+ req.Close = false
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ return 0, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusOK {
+ return strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
+ }
+ headStatusCode := res.StatusCode
+
+ // Failed to do HEAD request. Fall back to GET.
+ // ghcr.io (https://github-production-container-registry.s3.amazonaws.com) doesn't allow
+ // HEAD request (2020).
+ req, err = http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return 0, fmt.Errorf("failed to make request to the registry: %w", err)
+ }
+ req.Header = http.Header{}
+ for k, v := range header {
+ req.Header[k] = v
+ }
+ req.Close = false
+ req.Header.Set("Range", "bytes=0-1")
+ res, err = tr.RoundTrip(req)
+ if err != nil {
+ return 0, fmt.Errorf("failed to request: %w", err)
+ }
+ defer func() {
+ io.Copy(io.Discard, res.Body)
+ res.Body.Close()
+ }()
+
+ if res.StatusCode == http.StatusOK {
+ return strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
+ } else if res.StatusCode == http.StatusPartialContent {
+ _, size, err := parseRange(res.Header.Get("Content-Range"))
+ return size, err
+ }
+
+ return 0, fmt.Errorf("failed to get size with code (HEAD=%v, GET=%v)",
+ headStatusCode, res.StatusCode)
+}
+
+type httpFetcher struct {
+ url string
+ urlMu sync.Mutex
+ tr http.RoundTripper
+ blobURL string
+ digest digest.Digest
+ singleRange bool
+ singleRangeMu sync.Mutex
+ timeout time.Duration
+ header http.Header
+ orgHeader http.Header
+}
+
+type multipartReadCloser interface {
+ Next() (region, io.Reader, error)
+ Close() error
+}
+
+func (f *httpFetcher) fetch(ctx context.Context, rs []region, retry bool) (multipartReadCloser, error) {
+ if len(rs) == 0 {
+ return nil, fmt.Errorf("no request queried")
+ }
+
+ var (
+ tr = f.tr
+ singleRangeMode = f.isSingleRangeMode()
+ )
+
+ // squash requesting chunks for reducing the total size of request header
+ // (servers generally have limits for the size of headers)
+ // TODO: when our request has too many ranges, we need to divide it into
+ // multiple requests to avoid huge header.
+ var s regionSet
+ for _, reg := range rs {
+ s.add(reg)
+ }
+ requests := s.rs
+ if singleRangeMode {
+ // Squash requests if the layer doesn't support multi range.
+ requests = []region{superRegion(requests)}
+ }
+
+ // Request to the registry
+ f.urlMu.Lock()
+ url := f.url
+ f.urlMu.Unlock()
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = http.Header{}
+ for k, v := range f.header {
+ req.Header[k] = v
+ }
+ var ranges string
+ for _, reg := range requests {
+ ranges += fmt.Sprintf("%d-%d,", reg.b, reg.e)
+ }
+ req.Header.Add("Range", fmt.Sprintf("bytes=%s", ranges[:len(ranges)-1]))
+ req.Header.Add("Accept-Encoding", "identity")
+ req.Close = false
+
+ // Recording the roundtrip latency for remote registry GET operation.
+ start := time.Now()
+ res, err := tr.RoundTrip(req) // NOT DefaultClient; don't want redirects
+ commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.RemoteRegistryGet, f.digest, start)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode == http.StatusOK {
+ // We are getting the whole blob in one part (= status 200)
+ size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Content-Length: %w", err)
+ }
+ return newSinglePartReader(region{0, size - 1}, res.Body), nil
+ } else if res.StatusCode == http.StatusPartialContent {
+ mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, fmt.Errorf("invalid media type %q: %w", mediaType, err)
+ }
+ if strings.HasPrefix(mediaType, "multipart/") {
+ // We are getting a set of chunks as a multipart body.
+ return newMultiPartReader(res.Body, params["boundary"]), nil
+ }
+
+ // We are getting single range
+ reg, _, err := parseRange(res.Header.Get("Content-Range"))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Content-Range: %w", err)
+ }
+ return newSinglePartReader(reg, res.Body), nil
+ } else if retry && res.StatusCode == http.StatusForbidden {
+ log.G(ctx).Infof("Received status code: %v. Refreshing URL and retrying...", res.Status)
+
+ // re-redirect and retry this once.
+ if err := f.refreshURL(ctx); err != nil {
+ return nil, fmt.Errorf("failed to refresh URL on %v: %w", res.Status, err)
+ }
+ return f.fetch(ctx, rs, false)
+ } else if retry && res.StatusCode == http.StatusBadRequest && !singleRangeMode {
+ log.G(ctx).Infof("Received status code: %v. Setting single range mode and retrying...", res.Status)
+
+ // gcr.io (https://storage.googleapis.com) returns 400 on multi-range request (2020 #81)
+ f.singleRangeMode() // fallbacks to singe range request mode
+ return f.fetch(ctx, rs, false) // retries with the single range mode
+ }
+
+ return nil, fmt.Errorf("unexpected status code: %v", res.Status)
+}
+
+func (f *httpFetcher) check() error {
+ ctx := context.Background()
+ if f.timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, f.timeout)
+ defer cancel()
+ }
+ f.urlMu.Lock()
+ url := f.url
+ f.urlMu.Unlock()
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return fmt.Errorf("check failed: failed to make request: %w", err)
+ }
+ req.Header = http.Header{}
+ for k, v := range f.header {
+ req.Header[k] = v
+ }
+ req.Close = false
+ req.Header.Set("Range", "bytes=0-1")
+ res, err := f.tr.RoundTrip(req)
+ if err != nil {
+ return fmt.Errorf("check failed: failed to request to registry: %w", err)
+ }
+ defer func() {
+ io.Copy(io.Discard, res.Body)
+ res.Body.Close()
+ }()
+ if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusPartialContent {
+ return nil
+ } else if res.StatusCode == http.StatusForbidden {
+ // Try to re-redirect this blob
+ rCtx := context.Background()
+ if f.timeout > 0 {
+ var rCancel context.CancelFunc
+ rCtx, rCancel = context.WithTimeout(rCtx, f.timeout)
+ defer rCancel()
+ }
+ if err := f.refreshURL(rCtx); err == nil {
+ return nil
+ }
+ return fmt.Errorf("failed to refresh URL on status %v", res.Status)
+ }
+
+ return fmt.Errorf("unexpected status code %v", res.StatusCode)
+}
+
+func (f *httpFetcher) refreshURL(ctx context.Context) error {
+ newURL, headers, err := redirect(ctx, f.blobURL, f.tr, f.timeout, f.orgHeader)
+ if err != nil {
+ return err
+ }
+ f.urlMu.Lock()
+ f.url = newURL
+ f.header = headers
+ f.urlMu.Unlock()
+ return nil
+}
+
+func (f *httpFetcher) genID(reg region) string {
+ sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", f.blobURL, reg.b, reg.e)))
+ return fmt.Sprintf("%x", sum)
+}
+
+func (f *httpFetcher) singleRangeMode() {
+ f.singleRangeMu.Lock()
+ f.singleRange = true
+ f.singleRangeMu.Unlock()
+}
+
+func (f *httpFetcher) isSingleRangeMode() bool {
+ f.singleRangeMu.Lock()
+ r := f.singleRange
+ f.singleRangeMu.Unlock()
+ return r
+}
+
+func newSinglePartReader(reg region, rc io.ReadCloser) multipartReadCloser {
+ return &singlepartReader{
+ r: rc,
+ Closer: rc,
+ reg: reg,
+ }
+}
+
+type singlepartReader struct {
+ io.Closer
+ r io.Reader
+ reg region
+ called bool
+}
+
+func (sr *singlepartReader) Next() (region, io.Reader, error) {
+ if !sr.called {
+ sr.called = true
+ return sr.reg, sr.r, nil
+ }
+ return region{}, nil, io.EOF
+}
+
+func newMultiPartReader(rc io.ReadCloser, boundary string) multipartReadCloser {
+ return &multipartReader{
+ m: multipart.NewReader(rc, boundary),
+ Closer: rc,
+ }
+}
+
+type multipartReader struct {
+ io.Closer
+ m *multipart.Reader
+}
+
+func (sr *multipartReader) Next() (region, io.Reader, error) {
+ p, err := sr.m.NextPart()
+ if err != nil {
+ return region{}, nil, err
+ }
+ reg, _, err := parseRange(p.Header.Get("Content-Range"))
+ if err != nil {
+ return region{}, nil, fmt.Errorf("failed to parse Content-Range: %w", err)
+ }
+ return reg, p, nil
+}
+
+func parseRange(header string) (region, int64, error) {
+ submatches := contentRangeRegexp.FindStringSubmatch(header)
+ if len(submatches) < 4 {
+ return region{}, 0, fmt.Errorf("Content-Range %q doesn't have enough information", header)
+ }
+ begin, err := strconv.ParseInt(submatches[1], 10, 64)
+ if err != nil {
+ return region{}, 0, fmt.Errorf("failed to parse beginning offset %q: %w", submatches[1], err)
+ }
+ end, err := strconv.ParseInt(submatches[2], 10, 64)
+ if err != nil {
+ return region{}, 0, fmt.Errorf("failed to parse end offset %q: %w", submatches[2], err)
+ }
+ blobSize, err := strconv.ParseInt(submatches[3], 10, 64)
+ if err != nil {
+ return region{}, 0, fmt.Errorf("failed to parse blob size %q: %w", submatches[3], err)
+ }
+
+ return region{begin, end}, blobSize, nil
+}
+
+type Option func(*options)
+
+type options struct {
+ ctx context.Context
+ cacheOpts []cache.Option
+}
+
+func WithContext(ctx context.Context) Option {
+ return func(opts *options) {
+ opts.ctx = ctx
+ }
+}
+
+func WithCacheOpts(cacheOpts ...cache.Option) Option {
+ return func(opts *options) {
+ opts.cacheOpts = cacheOpts
+ }
+}
+
+type remoteFetcher struct {
+ r Fetcher
+}
+
+func (r *remoteFetcher) fetch(ctx context.Context, rs []region, retry bool) (multipartReadCloser, error) {
+ var s regionSet
+ for _, reg := range rs {
+ s.add(reg)
+ }
+ reg := superRegion(s.rs)
+ rc, err := r.r.Fetch(ctx, reg.b, reg.size())
+ if err != nil {
+ return nil, err
+ }
+ return newSinglePartReader(reg, rc), nil
+}
+
+func (r *remoteFetcher) check() error {
+ return r.r.Check()
+}
+
+func (r *remoteFetcher) genID(reg region) string {
+ return r.r.GenID(reg.b, reg.size())
+}
+
+type Handler interface {
+ Handle(ctx context.Context, desc ocispec.Descriptor) (fetcher Fetcher, size int64, err error)
+}
+
+type Fetcher interface {
+ Fetch(ctx context.Context, off int64, size int64) (io.ReadCloser, error)
+ Check() error
+ GenID(off int64, size int64) string
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/util.go b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/util.go
new file mode 100644
index 000000000000..1fd3899bfbb4
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/util.go
@@ -0,0 +1,109 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the NOTICE.md file.
+*/
+
+package remote
+
+// region is HTTP-range-request-compliant range.
+// "b" is beginning byte of the range and "e" is the end.
+// "e" is must be inclusive along with HTTP's range expression.
+type region struct{ b, e int64 }
+
+func (c region) size() int64 {
+ return c.e - c.b + 1
+}
+
+func superRegion(regs []region) region {
+ s := regs[0]
+ for _, reg := range regs {
+ if reg.b < s.b {
+ s.b = reg.b
+ }
+ if reg.e > s.e {
+ s.e = reg.e
+ }
+ }
+ return s
+}
+
+// regionSet is a set of regions
+type regionSet struct {
+ rs []region // must be kept sorted
+}
+
+// add attempts to merge r to rs.rs with squashing the regions as
+// small as possible. This operation takes O(n).
+// TODO: more efficient way to do it.
+func (rs *regionSet) add(r region) {
+ // Iterate over the sorted region slice from the tail.
+ // a) When an overwrap occurs, adjust `r` to fully contain the looking region
+ // `l` and remove `l` from region slice.
+ // b) Once l.e become less than r.b, no overwrap will occur again. So immediately
+ // insert `r` which fully contains all overwrapped regions, to the region slice.
+ // Here, `r` is inserted to the region slice with keeping it sorted, without
+ // overwrapping to any regions.
+ // *) If any `l` contains `r`, we don't need to do anything so return immediately.
+ for i := len(rs.rs) - 1; i >= 0; i-- {
+ l := &rs.rs[i]
+
+ // *) l contains r
+ if l.b <= r.b && r.e <= l.e {
+ return
+ }
+
+ // a) r overwraps to l so adjust r to fully contain l and reomve l
+ // from region slice.
+ if l.b <= r.b && r.b <= l.e+1 && l.e <= r.e {
+ r.b = l.b
+ rs.rs = append(rs.rs[:i], rs.rs[i+1:]...)
+ continue
+ }
+ if r.b <= l.b && l.b <= r.e+1 && r.e <= l.e {
+ r.e = l.e
+ rs.rs = append(rs.rs[:i], rs.rs[i+1:]...)
+ continue
+ }
+ if r.b <= l.b && l.e <= r.e {
+ rs.rs = append(rs.rs[:i], rs.rs[i+1:]...)
+ continue
+ }
+
+ // b) No overwrap will occur after this iteration. Instert r to the
+ // region slice immediately.
+ if l.e < r.b {
+ rs.rs = append(rs.rs[:i+1], append([]region{r}, rs.rs[i+1:]...)...)
+ return
+ }
+
+ // No overwrap occurs yet. See the next region.
+ }
+
+ // r is the topmost region among regions in the slice.
+ rs.rs = append([]region{r}, rs.rs...)
+}
+
+func (rs *regionSet) totalSize() int64 {
+ var sz int64
+ for _, f := range rs.rs {
+ sz += f.size()
+ }
+ return sz
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go b/vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go
new file mode 100644
index 000000000000..5b0190180294
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go
@@ -0,0 +1,271 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/stargz-snapshotter/fs/config"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// GetSources is a function for converting snapshot labels into typed blob sources
+// information. This package defines a default converter which provides source
+// information based on some labels but implementations aren't required to use labels.
+// Implementations are allowed to return several sources (registry config + image refs)
+// about the blob.
+type GetSources func(labels map[string]string) (source []Source, err error)
+
+// RegistryHosts returns a list of registries that provides the specified image.
+type RegistryHosts func(reference.Spec) ([]docker.RegistryHost, error)
+
+// Source is a typed blob source information. This contains information about
+// a blob stored in registries and some contexts of the blob.
+type Source struct {
+
+ // Hosts is a registry configuration where this blob is stored.
+ Hosts RegistryHosts
+
+ // Name is an image reference which contains this blob.
+ Name reference.Spec
+
+ // Target is a descriptor of this blob.
+ Target ocispec.Descriptor
+
+ // Manifest is an image manifest which contains the blob. This will
+ // be used by the filesystem to pre-resolve some layers contained in
+ // the manifest.
+ // Currently, only layer digests (Manifest.Layers.Digest) will be used.
+ Manifest ocispec.Manifest
+}
+
+const (
+ // targetRefLabel is a label which contains image reference.
+ targetRefLabel = "containerd.io/snapshot/remote/stargz.reference"
+
+ // targetDigestLabel is a label which contains layer digest.
+ targetDigestLabel = "containerd.io/snapshot/remote/stargz.digest"
+
+ // targetImageLayersLabel is a label which contains layer digests contained in
+ // the target image.
+ targetImageLayersLabel = "containerd.io/snapshot/remote/stargz.layers"
+
+ // targetImageURLsLabelPrefix is a label prefix which constructs a map from the layer index to
+ // urls of the layer descriptor.
+ targetImageURLsLabelPrefix = "containerd.io/snapshot/remote/urls."
+
+ // targetURsLLabel is a label which contains layer URL. This is only used to pass URL from containerd
+ // to snapshotter.
+ targetURLsLabel = "containerd.io/snapshot/remote/urls"
+)
+
+// FromDefaultLabels returns a function for converting snapshot labels to
+// source information based on labels.
+func FromDefaultLabels(hosts RegistryHosts) GetSources {
+ return func(labels map[string]string) ([]Source, error) {
+ refStr, ok := labels[targetRefLabel]
+ if !ok {
+ return nil, fmt.Errorf("reference hasn't been passed")
+ }
+ refspec, err := reference.Parse(refStr)
+ if err != nil {
+ return nil, err
+ }
+
+ digestStr, ok := labels[targetDigestLabel]
+ if !ok {
+ return nil, fmt.Errorf("digest hasn't been passed")
+ }
+ target, err := digest.Parse(digestStr)
+ if err != nil {
+ return nil, err
+ }
+
+ var neighboringLayers []ocispec.Descriptor
+ if l, ok := labels[targetImageLayersLabel]; ok {
+ layersStr := strings.Split(l, ",")
+ for i, l := range layersStr {
+ d, err := digest.Parse(l)
+ if err != nil {
+ return nil, err
+ }
+ if d.String() != target.String() {
+ desc := ocispec.Descriptor{Digest: d}
+ if urls, ok := labels[targetImageURLsLabelPrefix+fmt.Sprintf("%d", i)]; ok {
+ desc.URLs = strings.Split(urls, ",")
+ }
+ neighboringLayers = append(neighboringLayers, desc)
+ }
+ }
+ }
+
+ targetDesc := ocispec.Descriptor{
+ Digest: target,
+ Annotations: labels,
+ }
+ if targetURLs, ok := labels[targetURLsLabel]; ok {
+ targetDesc.URLs = append(targetDesc.URLs, strings.Split(targetURLs, ",")...)
+ }
+
+ return []Source{
+ {
+ Hosts: hosts,
+ Name: refspec,
+ Target: targetDesc,
+ Manifest: ocispec.Manifest{Layers: append([]ocispec.Descriptor{targetDesc}, neighboringLayers...)},
+ },
+ }, nil
+ }
+}
+
+// AppendDefaultLabelsHandlerWrapper makes a handler which appends image's basic
+// information to each layer descriptor as annotations during unpack. These
+// annotations will be passed to this remote snapshotter as labels and used to
+// construct source information.
+func AppendDefaultLabelsHandlerWrapper(ref string, prefetchSize int64) func(f images.Handler) images.Handler {
+ return func(f images.Handler) images.Handler {
+ return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := f.Handle(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ switch desc.MediaType {
+ case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
+ for i := range children {
+ c := &children[i]
+ if images.IsLayerType(c.MediaType) {
+ if c.Annotations == nil {
+ c.Annotations = make(map[string]string)
+ }
+ c.Annotations[targetRefLabel] = ref
+ c.Annotations[targetDigestLabel] = c.Digest.String()
+ var layers string
+ for i, l := range children[i:] {
+ if images.IsLayerType(l.MediaType) {
+ ls := fmt.Sprintf("%s,", l.Digest.String())
+ // This avoids the label hits the size limitation.
+ // Skipping layers is allowed here and only affects performance.
+ if err := labels.Validate(targetImageLayersLabel, layers+ls); err != nil {
+ break
+ }
+ layers += ls
+
+ // Store URLs of the neighbouring layer as well.
+ urlsKey := targetImageURLsLabelPrefix + fmt.Sprintf("%d", i)
+ c.Annotations[urlsKey] = appendWithValidation(urlsKey, l.URLs)
+ }
+ }
+ c.Annotations[targetImageLayersLabel] = strings.TrimSuffix(layers, ",")
+ c.Annotations[config.TargetPrefetchSizeLabel] = fmt.Sprintf("%d", prefetchSize)
+
+ // store URL in annotation to let containerd to pass it to the snapshotter
+ c.Annotations[targetURLsLabel] = appendWithValidation(targetURLsLabel, c.URLs)
+ }
+ }
+ }
+ return children, nil
+ })
+ }
+}
+
+func appendWithValidation(key string, values []string) string {
+ var v string
+ for _, u := range values {
+ s := fmt.Sprintf("%s,", u)
+ if err := labels.Validate(key, v+s); err != nil {
+ break
+ }
+ v += s
+ }
+ return strings.TrimSuffix(v, ",")
+}
+
+// TODO: switch to "github.com/containerd/containerd/pkg/snapshotters" once all tools using
+//
+// stargz-snapshotter (e.g. k3s) move to containerd version where that pkg is available.
+const (
+ // targetImageLayersLabel is a label which contains layer digests contained in
+ // the target image and will be passed to snapshotters for preparing layers in
+ // parallel. Skipping some layers is allowed and only affects performance.
+ targetImageLayersLabelContainerd = "containerd.io/snapshot/cri.image-layers"
+)
+
+// AppendExtraLabelsHandler adds optional labels that aren't provided by
+// "github.com/containerd/containerd/pkg/snapshotters" but can be used for stargz snapshotter's extra functionalities.
+func AppendExtraLabelsHandler(prefetchSize int64, wrapper func(images.Handler) images.Handler) func(images.Handler) images.Handler {
+ return func(f images.Handler) images.Handler {
+ return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ children, err := wrapper(f).Handle(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ switch desc.MediaType {
+ case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
+ for i := range children {
+ c := &children[i]
+ if !images.IsLayerType(c.MediaType) {
+ continue
+ }
+ if _, ok := c.Annotations[targetURLsLabel]; !ok { // nop if this key is already set
+ c.Annotations[targetURLsLabel] = appendWithValidation(targetURLsLabel, c.URLs)
+ }
+
+ if _, ok := c.Annotations[config.TargetPrefetchSizeLabel]; !ok { // nop if this key is already set
+ c.Annotations[config.TargetPrefetchSizeLabel] = fmt.Sprintf("%d", prefetchSize)
+ }
+
+ // Store URLs of the neighbouring layer as well.
+ nlayers, ok := c.Annotations[targetImageLayersLabelContainerd]
+ if !ok {
+ continue
+ }
+ for j, dstr := range strings.Split(nlayers, ",") {
+ d, err := digest.Parse(dstr)
+ if err != nil {
+ return nil, err
+ }
+ l, ok := layerFromDigest(children, d)
+ if !ok {
+ continue
+ }
+ urlsKey := targetImageURLsLabelPrefix + fmt.Sprintf("%d", j)
+ if _, ok := c.Annotations[urlsKey]; !ok { // nop if this key is already set
+ c.Annotations[urlsKey] = appendWithValidation(urlsKey, l.URLs)
+ }
+ }
+ }
+ }
+ return children, nil
+ })
+ }
+}
+
+func layerFromDigest(layers []ocispec.Descriptor, target digest.Digest) (ocispec.Descriptor, bool) {
+ for _, l := range layers {
+ if l.Digest == target {
+ return l, images.IsLayerType(l.MediaType)
+ }
+ }
+ return ocispec.Descriptor{}, false
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go b/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go
new file mode 100644
index 000000000000..6854be34ee91
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go
@@ -0,0 +1,283 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package memory
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ digest "github.com/opencontainers/go-digest"
+)
+
+type reader struct {
+ r *estargz.Reader
+ rootID uint32
+
+ idMap map[uint32]*estargz.TOCEntry
+ idOfEntry map[string]uint32
+
+ estargzOpts []estargz.OpenOption
+}
+
+func newReader(er *estargz.Reader, rootID uint32, idMap map[uint32]*estargz.TOCEntry, idOfEntry map[string]uint32, estargzOpts []estargz.OpenOption) *reader {
+ return &reader{r: er, rootID: rootID, idMap: idMap, idOfEntry: idOfEntry, estargzOpts: estargzOpts}
+}
+
+func NewReader(sr *io.SectionReader, opts ...metadata.Option) (metadata.Reader, error) {
+ var rOpts metadata.Options
+ for _, o := range opts {
+ if err := o(&rOpts); err != nil {
+ return nil, fmt.Errorf("failed to apply option: %w", err)
+ }
+ }
+
+ telemetry := &estargz.Telemetry{}
+ if rOpts.Telemetry != nil {
+ telemetry.GetFooterLatency = estargz.MeasureLatencyHook(rOpts.Telemetry.GetFooterLatency)
+ telemetry.GetTocLatency = estargz.MeasureLatencyHook(rOpts.Telemetry.GetTocLatency)
+ telemetry.DeserializeTocLatency = estargz.MeasureLatencyHook(rOpts.Telemetry.DeserializeTocLatency)
+ }
+ var decompressors []estargz.Decompressor
+ for _, d := range rOpts.Decompressors {
+ decompressors = append(decompressors, d)
+ }
+
+ erOpts := []estargz.OpenOption{
+ estargz.WithTOCOffset(rOpts.TOCOffset),
+ estargz.WithTelemetry(telemetry),
+ estargz.WithDecompressors(decompressors...),
+ }
+ er, err := estargz.Open(sr, erOpts...)
+ if err != nil {
+ return nil, err
+ }
+ root, ok := er.Lookup("")
+ if !ok {
+ return nil, fmt.Errorf("failed to get root node")
+ }
+ rootID, idMap, idOfEntry, err := assignIDs(er, root)
+ if err != nil {
+ return nil, err
+ }
+ r := newReader(er, rootID, idMap, idOfEntry, erOpts)
+ return r, nil
+}
+
+// assignIDs assigns an to each TOC item and returns a mapping from ID to entry and vice-versa.
+func assignIDs(er *estargz.Reader, e *estargz.TOCEntry) (rootID uint32, idMap map[uint32]*estargz.TOCEntry, idOfEntry map[string]uint32, err error) {
+ idMap = make(map[uint32]*estargz.TOCEntry)
+ idOfEntry = make(map[string]uint32)
+ curID := uint32(0)
+
+ nextID := func() (uint32, error) {
+ if curID == math.MaxUint32 {
+ return 0, fmt.Errorf("sequence id too large")
+ }
+ curID++
+ return curID, nil
+ }
+
+ var mapChildren func(e *estargz.TOCEntry) (uint32, error)
+ mapChildren = func(e *estargz.TOCEntry) (uint32, error) {
+ if e.Type == "hardlink" {
+ return 0, fmt.Errorf("unexpected type \"hardlink\": this should be replaced to the destination entry")
+ }
+
+ var ok bool
+ id, ok := idOfEntry[e.Name]
+ if !ok {
+ id, err = nextID()
+ if err != nil {
+ return 0, err
+ }
+ idMap[id] = e
+ idOfEntry[e.Name] = id
+ }
+
+ e.ForeachChild(func(_ string, ent *estargz.TOCEntry) bool {
+ _, err = mapChildren(ent)
+ return err == nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ return id, nil
+ }
+
+ rootID, err = mapChildren(e)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+
+ return rootID, idMap, idOfEntry, nil
+}
+
+func (r *reader) RootID() uint32 {
+ return r.rootID
+}
+
+func (r *reader) TOCDigest() digest.Digest {
+ return r.r.TOCDigest()
+}
+
+func (r *reader) GetOffset(id uint32) (offset int64, err error) {
+ e, ok := r.idMap[id]
+ if !ok {
+ return 0, fmt.Errorf("entry %d not found", id)
+ }
+ return e.Offset, nil
+}
+
+func (r *reader) GetAttr(id uint32) (attr metadata.Attr, err error) {
+ e, ok := r.idMap[id]
+ if !ok {
+ err = fmt.Errorf("entry %d not found", id)
+ return
+ }
+ // TODO: zero copy
+ attrFromTOCEntry(e, &attr)
+ return
+}
+
+func (r *reader) GetChild(pid uint32, base string) (id uint32, attr metadata.Attr, err error) {
+ e, ok := r.idMap[pid]
+ if !ok {
+ err = fmt.Errorf("parent entry %d not found", pid)
+ return
+ }
+ child, ok := e.LookupChild(base)
+ if !ok {
+ err = fmt.Errorf("child %q of entry %d not found", base, pid)
+ return
+ }
+ cid, ok := r.idOfEntry[child.Name]
+ if !ok {
+ err = fmt.Errorf("id of entry %q not found", base)
+ return
+ }
+ // TODO: zero copy
+ attrFromTOCEntry(child, &attr)
+ return cid, attr, nil
+}
+
+func (r *reader) ForeachChild(id uint32, f func(name string, id uint32, mode os.FileMode) bool) error {
+ e, ok := r.idMap[id]
+ if !ok {
+ return fmt.Errorf("parent entry %d not found", id)
+ }
+ var err error
+ e.ForeachChild(func(baseName string, ent *estargz.TOCEntry) bool {
+ id, ok := r.idOfEntry[ent.Name]
+ if !ok {
+ err = fmt.Errorf("id of child entry %q not found", baseName)
+ return false
+ }
+ return f(baseName, id, ent.Stat().Mode())
+ })
+ return err
+}
+
+func (r *reader) OpenFile(id uint32) (metadata.File, error) {
+ e, ok := r.idMap[id]
+ if !ok {
+ return nil, fmt.Errorf("entry %d not found", id)
+ }
+ sr, err := r.r.OpenFile(e.Name)
+ if err != nil {
+ return nil, err
+ }
+ return &file{r, e, sr}, nil
+}
+
+func (r *reader) OpenFileWithPreReader(id uint32, preRead func(id uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) error) (metadata.File, error) {
+ e, ok := r.idMap[id]
+ if !ok {
+ return nil, fmt.Errorf("entry %d not found", id)
+ }
+ sr, err := r.r.OpenFileWithPreReader(e.Name, func(e *estargz.TOCEntry, chunkR io.Reader) error {
+ cid, ok := r.idOfEntry[e.Name]
+ if !ok {
+ return fmt.Errorf("id of entry %q not found", e.Name)
+ }
+ return preRead(cid, e.ChunkOffset, e.ChunkSize, e.ChunkDigest, chunkR)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &file{r, e, sr}, nil
+}
+
+func (r *reader) Clone(sr *io.SectionReader) (metadata.Reader, error) {
+ er, err := estargz.Open(sr, r.estargzOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return newReader(er, r.rootID, r.idMap, r.idOfEntry, r.estargzOpts), nil
+}
+
+func (r *reader) Close() error {
+ return nil
+}
+
+type file struct {
+ r *reader
+ e *estargz.TOCEntry
+ sr *io.SectionReader
+}
+
+func (r *file) ChunkEntryForOffset(offset int64) (off int64, size int64, dgst string, ok bool) {
+ e, ok := r.r.r.ChunkEntryForOffset(r.e.Name, offset)
+ if !ok {
+ return 0, 0, "", false
+ }
+ dgst = e.Digest
+ if e.ChunkDigest != "" {
+ // NOTE* "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of
+ // chunked file)
+ dgst = e.ChunkDigest
+ }
+ return e.ChunkOffset, e.ChunkSize, dgst, true
+}
+
+func (r *file) ReadAt(p []byte, off int64) (n int, err error) {
+ return r.sr.ReadAt(p, off)
+}
+
+func (r *reader) NumOfNodes() (i int, _ error) {
+ return len(r.idMap), nil
+}
+
+// TODO: share it with db pkg
+func attrFromTOCEntry(src *estargz.TOCEntry, dst *metadata.Attr) *metadata.Attr {
+ dst.Size = src.Size
+ dst.ModTime, _ = time.Parse(time.RFC3339, src.ModTime3339)
+ dst.LinkName = src.LinkName
+ dst.Mode = src.Stat().Mode()
+ dst.UID = src.UID
+ dst.GID = src.GID
+ dst.DevMajor = src.DevMajor
+ dst.DevMinor = src.DevMinor
+ dst.Xattrs = src.Xattrs
+ dst.NumLink = src.NumLink
+ return dst
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go b/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go
new file mode 100644
index 000000000000..ee25f27d0016
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go
@@ -0,0 +1,139 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package metadata
+
+import (
+ "io"
+ "os"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// Attr reprensents the attributes of a node.
+type Attr struct {
+ // Size, for regular files, is the logical size of the file.
+ Size int64
+
+ // ModTime is the modification time of the node.
+ ModTime time.Time
+
+ // LinkName, for symlinks, is the link target.
+ LinkName string
+
+ // Mode is the permission and mode bits.
+ Mode os.FileMode
+
+ // UID is the user ID of the owner.
+ UID int
+
+ // GID is the group ID of the owner.
+ GID int
+
+ // DevMajor is the major device number for device.
+ DevMajor int
+
+ // DevMinor is the major device number for device.
+ DevMinor int
+
+ // Xattrs are the extended attribute for the node.
+ Xattrs map[string][]byte
+
+ // NumLink is the number of names pointing to this node.
+ NumLink int
+}
+
+// Store reads the provided eStargz blob and creates a metadata reader.
+type Store func(sr *io.SectionReader, opts ...Option) (Reader, error)
+
+// Reader provides access to file metadata of a blob.
+type Reader interface {
+ RootID() uint32
+ TOCDigest() digest.Digest
+
+ GetOffset(id uint32) (offset int64, err error)
+ GetAttr(id uint32) (attr Attr, err error)
+ GetChild(pid uint32, base string) (id uint32, attr Attr, err error)
+ ForeachChild(id uint32, f func(name string, id uint32, mode os.FileMode) bool) error
+ OpenFile(id uint32) (File, error)
+ OpenFileWithPreReader(id uint32, preRead func(id uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) error) (File, error)
+
+ Clone(sr *io.SectionReader) (Reader, error)
+ Close() error
+}
+
+type File interface {
+ ChunkEntryForOffset(offset int64) (off int64, size int64, dgst string, ok bool)
+ ReadAt(p []byte, off int64) (n int, err error)
+}
+
+type Decompressor interface {
+ estargz.Decompressor
+
+ // DecompressTOC decompresses the passed blob and returns a reader of TOC JSON.
+ //
+ // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
+ // Pass nil reader to DecompressTOC then we expect that DecompressTOC acquire TOC from the external
+ // location and return it.
+ DecompressTOC(io.Reader) (tocJSON io.ReadCloser, err error)
+}
+
+type Options struct {
+ TOCOffset int64
+ Telemetry *Telemetry
+ Decompressors []Decompressor
+}
+
+// Option is an option to configure the behaviour of reader.
+type Option func(o *Options) error
+
+// WithTOCOffset option specifies the offset of TOC
+func WithTOCOffset(tocOffset int64) Option {
+ return func(o *Options) error {
+ o.TOCOffset = tocOffset
+ return nil
+ }
+}
+
+// WithTelemetry option specifies the telemetry hooks
+func WithTelemetry(telemetry *Telemetry) Option {
+ return func(o *Options) error {
+ o.Telemetry = telemetry
+ return nil
+ }
+}
+
+// WithDecompressors option specifies decompressors to use.
+// Default is gzip-based decompressor.
+func WithDecompressors(decompressors ...Decompressor) Option {
+ return func(o *Options) error {
+ o.Decompressors = decompressors
+ return nil
+ }
+}
+
+// A func which takes start time and records the diff
+type MeasureLatencyHook func(time.Time)
+
+// A struct which defines telemetry hooks. By implementing these hooks you should be able to record
+// the latency metrics of the respective steps of estargz open operation.
+type Telemetry struct {
+ GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
+ GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds)
+ DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/estargz.go
new file mode 100644
index 000000000000..57d54011f5ce
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/estargz.go
@@ -0,0 +1,154 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package estargz
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/images/converter"
+ "github.com/containerd/containerd/v2/core/images/converter/uncompress"
+ "github.com/containerd/containerd/v2/pkg/archive/compression"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ "github.com/containerd/stargz-snapshotter/util/ioutils"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// LayerConvertWithLayerAndCommonOptsFunc converts legacy tar.gz layers into eStargz tar.gz
+// layers. Media type is unchanged. Should be used in conjunction with WithDockerToOCI(). See
+// LayerConvertFunc for more details. The difference between this function and
+// LayerConvertFunc is that this allows to specify additional eStargz options per layer.
+func LayerConvertWithLayerAndCommonOptsFunc(opts map[digest.Digest][]estargz.Option, commonOpts ...estargz.Option) converter.ConvertFunc {
+ if opts == nil {
+ return LayerConvertFunc(commonOpts...)
+ }
+ return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ // TODO: enable to speciy option per layer "index" because it's possible that there are
+ // two layers having same digest in an image (but this should be rare case)
+ return LayerConvertFunc(append(commonOpts, opts[desc.Digest]...)...)(ctx, cs, desc)
+ }
+}
+
+// LayerConvertFunc converts legacy tar.gz layers into eStargz tar.gz layers.
+// Media type is unchanged.
+//
+// Should be used in conjunction with WithDockerToOCI().
+//
+// Otherwise "containerd.io/snapshot/stargz/toc.digest" annotation will be lost,
+// because the Docker media type does not support layer annotations.
+func LayerConvertFunc(opts ...estargz.Option) converter.ConvertFunc {
+ return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ if !images.IsLayerType(desc.MediaType) {
+ // No conversion. No need to return an error here.
+ return nil, nil
+ }
+ info, err := cs.Info(ctx, desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ labelz := info.Labels
+ if labelz == nil {
+ labelz = make(map[string]string)
+ }
+
+ ra, err := cs.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ defer ra.Close()
+ sr := io.NewSectionReader(ra, 0, desc.Size)
+ blob, err := estargz.Build(sr, append(opts, estargz.WithContext(ctx))...)
+ if err != nil {
+ return nil, err
+ }
+ defer blob.Close()
+ ref := fmt.Sprintf("convert-estargz-from-%s", desc.Digest)
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return nil, err
+ }
+ defer w.Close()
+
+ // Reset the writing position
+ // Old writer possibly remains without aborted
+ // (e.g. conversion interrupted by a signal)
+ if err := w.Truncate(0); err != nil {
+ return nil, err
+ }
+
+ // Copy and count the contents
+ pr, pw := io.Pipe()
+ c := new(ioutils.CountWriter)
+ doneCount := make(chan struct{})
+ go func() {
+ defer close(doneCount)
+ defer pr.Close()
+ decompressR, err := compression.DecompressStream(pr)
+ if err != nil {
+ pr.CloseWithError(err)
+ return
+ }
+ defer decompressR.Close()
+ if _, err := io.Copy(c, decompressR); err != nil {
+ pr.CloseWithError(err)
+ return
+ }
+ }()
+ n, err := io.Copy(w, io.TeeReader(blob, pw))
+ if err != nil {
+ return nil, err
+ }
+ if err := blob.Close(); err != nil {
+ return nil, err
+ }
+ if err := pw.Close(); err != nil {
+ return nil, err
+ }
+ <-doneCount
+
+ // update diffID label
+ labelz[labels.LabelUncompressed] = blob.DiffID().String()
+ if err = w.Commit(ctx, n, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) {
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ return nil, err
+ }
+ newDesc := desc
+ if uncompress.IsUncompressedType(newDesc.MediaType) {
+ if images.IsDockerType(newDesc.MediaType) {
+ newDesc.MediaType += ".gzip"
+ } else {
+ newDesc.MediaType += "+gzip"
+ }
+ }
+ newDesc.Digest = w.Digest()
+ newDesc.Size = n
+ if newDesc.Annotations == nil {
+ newDesc.Annotations = make(map[string]string, 1)
+ }
+ newDesc.Annotations[estargz.TOCJSONDigestAnnotation] = blob.TOCDigest().String()
+ newDesc.Annotations[estargz.StoreUncompressedSizeAnnotation] = fmt.Sprintf("%d", c.Size())
+ return &newDesc, nil
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc/converter.go b/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc/converter.go
new file mode 100644
index 000000000000..3443f99baae3
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc/converter.go
@@ -0,0 +1,398 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package externaltoc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/images/converter"
+ "github.com/containerd/containerd/v2/core/images/converter/uncompress"
+ "github.com/containerd/containerd/v2/pkg/archive/compression"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/stargz-snapshotter/estargz"
+ esgzexternaltoc "github.com/containerd/stargz-snapshotter/estargz/externaltoc"
+ estargzconvert "github.com/containerd/stargz-snapshotter/nativeconverter/estargz"
+ "github.com/containerd/stargz-snapshotter/util/ioutils"
+ "github.com/opencontainers/go-digest"
+ ocispecspec "github.com/opencontainers/image-spec/specs-go"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// LayerConvertFunc converts legacy tar.gz layers into eStargz tar.gz layers.
+//
+// finalize() callback function returned by this function will return the image that contains
+// external TOC of each layer. Note that the returned image by isn't stored to the containerd image
+// store so far so the caller needs to do it.
+//
+// Media type is unchanged.
+//
+// Should be used in conjunction with WithDockerToOCI().
+//
+// Otherwise "containerd.io/snapshot/stargz/toc.digest" annotation will be lost,
+// because the Docker media type does not support layer annotations.
+//
+// WithCompression() in esgzOpts will be ignored but used the one for external TOC instead.
+func LayerConvertFunc(esgzOpts []estargz.Option, compressionLevel int) (convertFunc converter.ConvertFunc, finalize func(ctx context.Context, cs content.Store, ref string, desc *ocispec.Descriptor) (*images.Image, error)) {
+ return layerConvert(func(c estargz.Compression) converter.ConvertFunc {
+ return estargzconvert.LayerConvertFunc(append(esgzOpts, estargz.WithCompression(c))...)
+ }, compressionLevel)
+}
+
+// LayerConvertWithLayerAndCommonOptsFunc converts legacy tar.gz layers into eStargz.
+// Media type is unchanged. Should be used in conjunction with WithDockerToOCI(). See
+// LayerConvertFunc for more details. The difference between this function and
+// LayerConvertFunc is that this allows to specify additional eStargz options per layer.
+func LayerConvertWithLayerAndCommonOptsFunc(opts map[digest.Digest][]estargz.Option, commonOpts []estargz.Option, compressionLevel int) (convertFunc converter.ConvertFunc, finalize func(ctx context.Context, cs content.Store, ref string, desc *ocispec.Descriptor) (*images.Image, error)) {
+ return layerConvert(func(c estargz.Compression) converter.ConvertFunc {
+ return estargzconvert.LayerConvertWithLayerAndCommonOptsFunc(opts, append(commonOpts,
+ estargz.WithCompression(c),
+ )...)
+ }, compressionLevel)
+}
+
+// LayerConvertLossLessConfig is configuration for LayerConvertLossLessFunc.
+type LayerConvertLossLessConfig struct {
+ CompressionLevel int
+ ChunkSize int
+ MinChunkSize int
+}
+
+// LayerConvertLossLessFunc converts legacy tar.gz layers into eStargz tar.gz layers without changing
+// the diffIDs (i.e. uncompressed digest).
+//
+// finalize() callback function returned by this function will return the image that contains
+// external TOC of each layer. Note that the returned image by isn't stored to the containerd image
+// store so far so the caller needs to do it.
+//
+// Media type is unchanged.
+//
+// Should be used in conjunction with WithDockerToOCI().
+//
+// Otherwise "containerd.io/snapshot/stargz/toc.digest" annotation will be lost,
+// because the Docker media type does not support layer annotations.
+//
+// WithCompression() in esgzOpts will be ignored but used the one for external TOC instead.
+func LayerConvertLossLessFunc(cfg LayerConvertLossLessConfig) (convertFunc converter.ConvertFunc, finalize func(ctx context.Context, cs content.Store, ref string, desc *ocispec.Descriptor) (*images.Image, error)) {
+ return layerConvert(func(c estargz.Compression) converter.ConvertFunc {
+ return layerLossLessConvertFunc(c, cfg.ChunkSize, cfg.MinChunkSize)
+ }, cfg.CompressionLevel)
+}
+
+func layerConvert(layerConvertFunc func(estargz.Compression) converter.ConvertFunc, compressionLevel int) (convertFunc converter.ConvertFunc, finalize func(ctx context.Context, cs content.Store, ref string, desc *ocispec.Descriptor) (*images.Image, error)) {
+ type tocInfo struct {
+ digest digest.Digest
+ size int64
+ }
+ esgzDigest2TOC := make(map[digest.Digest]tocInfo)
+ // TODO: currently, all layers of all platforms are combined to one TOC manifest. Maybe we can consider
+ // having a separated TOC manifest per platform.
+ converterFunc := func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ cm := esgzexternaltoc.NewGzipCompressionWithLevel(nil, compressionLevel)
+ c := cm.(*esgzexternaltoc.GzipCompression)
+ cf := layerConvertFunc(c)
+ desc2, err := cf(ctx, cs, desc)
+ if err != nil {
+ return desc2, err
+ }
+ var layerDgst digest.Digest
+ if desc2 != nil {
+ layerDgst = desc2.Digest
+ } else {
+ layerDgst = desc.Digest // no conversion happened
+ }
+ dgst, size, err := writeTOCTo(ctx, c, cs)
+ if err != nil {
+ return nil, err
+ }
+ esgzDigest2TOC[layerDgst] = tocInfo{dgst, size}
+ return desc2, nil
+ }
+ finalizeFunc := func(ctx context.Context, cs content.Store, ref string, desc *ocispec.Descriptor) (*images.Image, error) {
+ var layers []ocispec.Descriptor
+ for esgzDigest, toc := range esgzDigest2TOC {
+ layers = append(layers, ocispec.Descriptor{
+ MediaType: ocispec.MediaTypeImageLayerGzip,
+ Digest: toc.digest,
+ Size: toc.size,
+ Annotations: map[string]string{
+ "containerd.io/snapshot/stargz/layer.digest": esgzDigest.String(),
+ },
+ })
+ }
+ sort.Slice(layers, func(i, j int) bool {
+ return layers[i].Digest.String() < layers[j].Digest.String()
+ })
+ mfst, err := createManifest(ctx, cs, ocispec.ImageConfig{}, layers)
+ if err != nil {
+ return nil, err
+ }
+ tocImgRef, err := getTOCReference(ref)
+ if err != nil {
+ return nil, err
+ }
+ return &images.Image{
+ Name: tocImgRef,
+ Target: *mfst,
+ }, nil
+ }
+ return converterFunc, finalizeFunc
+}
+
+func getTOCReference(ref string) (string, error) {
+ refspec, err := reference.Parse(ref)
+ if err != nil {
+ return "", err
+ }
+ refspec.Object = refspec.Object + "-esgztoc" // TODO: support custom location
+ return refspec.String(), nil
+}
+
+func layerLossLessConvertFunc(compressor estargz.Compressor, chunkSize int, minChunkSize int) converter.ConvertFunc {
+ return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ if !images.IsLayerType(desc.MediaType) {
+ // No conversion. No need to return an error here.
+ return nil, nil
+ }
+ info, err := cs.Info(ctx, desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ labelz := info.Labels
+ if labelz == nil {
+ labelz = make(map[string]string)
+ }
+
+ ra, err := cs.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+ defer ra.Close()
+ sr := io.NewSectionReader(ra, 0, desc.Size)
+ ref := fmt.Sprintf("convert-estargz-from-%s", desc.Digest)
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return nil, err
+ }
+ defer w.Close()
+
+ // Reset the writing position
+ // Old writer possibly remains without aborted
+ // (e.g. conversion interrupted by a signal)
+ if err := w.Truncate(0); err != nil {
+ return nil, err
+ }
+
+ // Copy and count the contents
+ esgzUW, esgzUncompressedInfoCh := calcUncompression()
+ orgUW, orgUncompressedInfoCh := calcUncompression()
+ countW := new(ioutils.CountWriter)
+ mw := io.MultiWriter(io.MultiWriter(w, countW), esgzUW)
+ var ew *estargz.Writer
+ if compressor != nil {
+ ew = estargz.NewWriterWithCompressor(mw, compressor)
+ } else {
+ ew = estargz.NewWriter(mw)
+ }
+ if chunkSize > 0 {
+ ew.ChunkSize = chunkSize
+ }
+ ew.MinChunkSize = minChunkSize
+ if err := ew.AppendTarLossLess(io.TeeReader(sr, orgUW)); err != nil {
+ return nil, fmt.Errorf("cannot perform compression in lossless way: %w", err)
+ }
+ tocDgst, err := ew.Close()
+ if err != nil {
+ return nil, err
+ }
+ n := countW.Size()
+ if err := esgzUW.Close(); err != nil {
+ return nil, err
+ }
+ if err := orgUW.Close(); err != nil {
+ return nil, err
+ }
+ esgzUncompressedInfo := <-esgzUncompressedInfoCh
+ orgUncompressedInfo := <-orgUncompressedInfoCh
+
+ // check the lossless conversion
+ if esgzUncompressedInfo.diffID.String() != orgUncompressedInfo.diffID.String() {
+ return nil, fmt.Errorf("unexpected diffID %q; want %q",
+ esgzUncompressedInfo.diffID.String(), orgUncompressedInfo.diffID.String())
+ }
+ if esgzUncompressedInfo.size != orgUncompressedInfo.size {
+ return nil, fmt.Errorf("unexpected uncompressed size %q; want %q",
+ esgzUncompressedInfo.size, orgUncompressedInfo.size)
+ }
+
+ // write diffID label
+ labelz[labels.LabelUncompressed] = esgzUncompressedInfo.diffID.String()
+ if err = w.Commit(ctx, n, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) {
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ return nil, err
+ }
+ newDesc := desc
+ if uncompress.IsUncompressedType(newDesc.MediaType) {
+ if images.IsDockerType(newDesc.MediaType) {
+ newDesc.MediaType += ".gzip"
+ } else {
+ newDesc.MediaType += "+gzip"
+ }
+ }
+ newDesc.Digest = w.Digest()
+ newDesc.Size = n
+ if newDesc.Annotations == nil {
+ newDesc.Annotations = make(map[string]string, 1)
+ }
+ newDesc.Annotations[estargz.TOCJSONDigestAnnotation] = tocDgst.String()
+ newDesc.Annotations[estargz.StoreUncompressedSizeAnnotation] = fmt.Sprintf("%d", esgzUncompressedInfo.size)
+ return &newDesc, nil
+ }
+}
+
+type uncompressedInfo struct {
+ diffID digest.Digest
+ size int64
+}
+
+func calcUncompression() (*io.PipeWriter, chan uncompressedInfo) {
+ pr, pw := io.Pipe()
+ infoCh := make(chan uncompressedInfo)
+ go func() {
+ defer pr.Close()
+
+ c := new(ioutils.CountWriter)
+ diffID := digest.Canonical.Digester()
+ decompressR, err := compression.DecompressStream(pr)
+ if err != nil {
+ pr.CloseWithError(err)
+ close(infoCh)
+ return
+ }
+ defer decompressR.Close()
+ if _, err := io.Copy(io.MultiWriter(c, diffID.Hash()), decompressR); err != nil {
+ pr.CloseWithError(err)
+ close(infoCh)
+ return
+ }
+ infoCh <- uncompressedInfo{
+ diffID: diffID.Digest(),
+ size: c.Size(),
+ }
+ }()
+ return pw, infoCh
+}
+
+func writeTOCTo(ctx context.Context, gc *esgzexternaltoc.GzipCompression, cs content.Store) (digest.Digest, int64, error) {
+ ref := "external-toc" + time.Now().String()
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return "", 0, err
+ }
+ defer w.Close()
+ if err := w.Truncate(0); err != nil {
+ return "", 0, err
+ }
+ c := new(ioutils.CountWriter)
+ dgstr := digest.Canonical.Digester()
+ n, err := gc.WriteTOCTo(io.MultiWriter(io.MultiWriter(w, dgstr.Hash()), c))
+ if err != nil {
+ return "", 0, err
+ }
+ if err := w.Commit(ctx, int64(n), ""); err != nil && !errdefs.IsAlreadyExists(err) {
+ return "", 0, err
+ }
+ if err := w.Close(); err != nil {
+ return "", 0, err
+ }
+
+ return dgstr.Digest(), c.Size(), nil
+}
+
+func createManifest(ctx context.Context, cs content.Store, config ocispec.ImageConfig, layers []ocispec.Descriptor) (*ocispec.Descriptor, error) {
+ // Create config
+ configDgst, configSize, err := writeJSON(ctx, cs, &config, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create manifest
+ mfst := ocispec.Manifest{
+ Versioned: ocispecspec.Versioned{
+ SchemaVersion: 2,
+ },
+ MediaType: ocispec.MediaTypeImageManifest,
+ Config: ocispec.Descriptor{
+ MediaType: ocispec.MediaTypeImageConfig,
+ Digest: configDgst,
+ Size: configSize,
+ },
+ Layers: layers,
+ }
+ mfstLabels := make(map[string]string)
+ for i, ld := range mfst.Layers {
+ mfstLabels[fmt.Sprintf("containerd.io/gc.ref.content.l.%d", i)] = ld.Digest.String()
+ }
+ mfstLabels["containerd.io/gc.ref.content.c.0"] = configDgst.String()
+ mfstDgst, mfstSize, err := writeJSON(ctx, cs, &mfst, mfstLabels)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ocispec.Descriptor{
+ MediaType: ocispec.MediaTypeImageManifest,
+ Digest: mfstDgst,
+ Size: mfstSize,
+ }, nil
+}
+func writeJSON(ctx context.Context, cs content.Store, data interface{}, labels map[string]string) (digest.Digest, int64, error) {
+ raw, err := json.Marshal(data)
+ if err != nil {
+ return "", 0, err
+ }
+ size := len(raw)
+ ref := "write-json-ref" + digest.FromBytes(raw).String()
+ w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+ if err != nil {
+ return "", 0, err
+ }
+ defer w.Close()
+ if err := w.Truncate(0); err != nil {
+ return "", 0, err
+ }
+ if _, err := w.Write(raw); err != nil {
+ return "", 0, err
+ }
+ if err = w.Commit(ctx, int64(size), "", content.WithLabels(labels)); err != nil && !errdefs.IsAlreadyExists(err) {
+ return "", 0, err
+ }
+ dgst := w.Digest()
+ if err := w.Close(); err != nil {
+ return "", 0, err
+ }
+ return dgst, int64(size), nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc/fetcher.go b/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc/fetcher.go
new file mode 100644
index 000000000000..01fc6fa5c5d5
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc/fetcher.go
@@ -0,0 +1,90 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package externaltoc
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/platforms"
+ esgzexternaltoc "github.com/containerd/stargz-snapshotter/estargz/externaltoc"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ "github.com/containerd/stargz-snapshotter/util/containerdutil"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func NewRemoteDecompressor(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) *esgzexternaltoc.GzipDecompressor {
+ return esgzexternaltoc.NewGzipDecompressor(func() ([]byte, error) {
+ resolver := docker.NewResolver(docker.ResolverOptions{
+ Hosts: func(host string) ([]docker.RegistryHost, error) {
+ if host != refspec.Hostname() {
+ return nil, fmt.Errorf("unexpected host %q for image ref %q", host, refspec.String())
+ }
+ return hosts(refspec)
+ },
+ })
+ return fetchTOCBlob(ctx, resolver, refspec, desc.Digest)
+ })
+}
+
+func fetchTOCBlob(ctx context.Context, resolver remotes.Resolver, refspec reference.Spec, dgst digest.Digest) ([]byte, error) {
+ // TODO: support custom location of TOC manifest and TOCs using annotations, etc.
+ tocImgRef, err := getTOCReference(refspec.String())
+ if err != nil {
+ return nil, err
+ }
+ _, img, err := resolver.Resolve(ctx, tocImgRef)
+ if err != nil {
+ return nil, err
+ }
+ fetcher, err := resolver.Fetcher(ctx, tocImgRef)
+ if err != nil {
+ return nil, err
+ }
+ // TODO: cache this manifest
+ manifest, err := containerdutil.FetchManifestPlatform(ctx, fetcher, img, platforms.DefaultSpec())
+ if err != nil {
+ return nil, err
+ }
+ return fetchTOCBlobFromManifest(ctx, fetcher, manifest, dgst)
+}
+
+func fetchTOCBlobFromManifest(ctx context.Context, fetcher remotes.Fetcher, manifest ocispec.Manifest, layerDigest digest.Digest) ([]byte, error) {
+ for _, l := range manifest.Layers {
+ if len(l.Annotations) == 0 {
+ continue
+ }
+ ldgst, ok := l.Annotations["containerd.io/snapshot/stargz/layer.digest"]
+ if !ok {
+ continue
+ }
+ if ldgst == layerDigest.String() {
+ r, err := fetcher.Fetch(ctx, l)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ return io.ReadAll(r)
+ }
+ }
+ return nil, fmt.Errorf("TOC not found")
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/config.go b/vendor/github.com/containerd/stargz-snapshotter/service/config.go
new file mode 100644
index 000000000000..95bb6da5c3a9
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/config.go
@@ -0,0 +1,70 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package service
+
+import (
+ "github.com/containerd/stargz-snapshotter/fs/config"
+ "github.com/containerd/stargz-snapshotter/service/resolver"
+)
+
+// Config is configuration for stargz snapshotter service.
+type Config struct {
+ config.Config
+
+ // KubeconfigKeychainConfig is config for kubeconfig-based keychain.
+ KubeconfigKeychainConfig `toml:"kubeconfig_keychain"`
+
+ // CRIKeychainConfig is config for CRI-based keychain.
+ CRIKeychainConfig `toml:"cri_keychain"`
+
+ // ResolverConfig is config for resolving registries.
+ ResolverConfig `toml:"resolver"`
+
+ // SnapshotterConfig is snapshotter-related config.
+ SnapshotterConfig `toml:"snapshotter"`
+}
+
+// KubeconfigKeychainConfig is config for kubeconfig-based keychain.
+type KubeconfigKeychainConfig struct {
+ // EnableKeychain enables kubeconfig-based keychain
+ EnableKeychain bool `toml:"enable_keychain"`
+
+ // KubeconfigPath is the path to kubeconfig which can be used to sync
+ // secrets on the cluster into this snapshotter.
+ KubeconfigPath string `toml:"kubeconfig_path"`
+}
+
+// CRIKeychainConfig is config for CRI-based keychain.
+type CRIKeychainConfig struct {
+ // EnableKeychain enables CRI-based keychain
+ EnableKeychain bool `toml:"enable_keychain"`
+
+ // ImageServicePath is the path to the unix socket of backing CRI Image Service (e.g. containerd CRI plugin)
+ ImageServicePath string `toml:"image_service_path"`
+}
+
+// ResolverConfig is config for resolving registries.
+type ResolverConfig resolver.Config
+
+// SnapshotterConfig is snapshotter-related config.
+type SnapshotterConfig struct {
+ // AllowInvalidMountsOnRestart allows that there are snapshot mounts that cannot access to the
+ // data source when restarting the snapshotter.
+ // NOTE: User needs to manually remove the snapshots from containerd's metadata store using
+ // ctr (e.g. `ctr snapshot rm`).
+ AllowInvalidMountsOnRestart bool `toml:"allow_invalid_mounts_on_restart"`
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/cri.go b/vendor/github.com/containerd/stargz-snapshotter/service/cri.go
new file mode 100644
index 000000000000..5028450b9f45
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/cri.go
@@ -0,0 +1,112 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package service
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ digest "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// TODO: switch to "github.com/containerd/containerd/pkg/snapshotters" once all tools using
+//
+// stargz-snapshotter (e.g. k3s) move to containerd version where that pkg is available.
+const (
+ // targetRefLabel is a label which contains image reference and will be passed
+ // to snapshotters.
+ targetRefLabel = "containerd.io/snapshot/cri.image-ref"
+ // targetLayerDigestLabel is a label which contains layer digest and will be passed
+ // to snapshotters.
+ targetLayerDigestLabel = "containerd.io/snapshot/cri.layer-digest"
+ // targetImageLayersLabel is a label which contains layer digests contained in
+ // the target image and will be passed to snapshotters for preparing layers in
+ // parallel. Skipping some layers is allowed and only affects performance.
+ targetImageLayersLabel = "containerd.io/snapshot/cri.image-layers"
+)
+
+const (
+ // targetImageURLsLabelPrefix is a label prefix which constructs a map from the layer index to
+ // urls of the layer descriptor. This isn't contained in the set of the labels passed from CRI plugin but
+ // some clients (e.g. nerdctl) passes this for preserving url field in the OCI descriptor.
+ targetImageURLsLabelPrefix = "containerd.io/snapshot/remote/urls."
+
+ // targetURsLLabel is a label which contains layer URL. This is only used to pass URL from containerd
+ // to snapshotter. This isn't contained in the set of the labels passed from CRI plugin but
+ // some clients (e.g. nerdctl) passes this for preserving url field in the OCI descriptor.
+ targetURLsLabel = "containerd.io/snapshot/remote/urls"
+)
+
+func sourceFromCRILabels(hosts source.RegistryHosts) source.GetSources {
+ return func(labels map[string]string) ([]source.Source, error) {
+ refStr, ok := labels[targetRefLabel]
+ if !ok {
+ return nil, fmt.Errorf("reference hasn't been passed")
+ }
+ refspec, err := reference.Parse(refStr)
+ if err != nil {
+ return nil, err
+ }
+
+ digestStr, ok := labels[targetLayerDigestLabel]
+ if !ok {
+ return nil, fmt.Errorf("digest hasn't been passed")
+ }
+ target, err := digest.Parse(digestStr)
+ if err != nil {
+ return nil, err
+ }
+
+ var neighboringLayers []ocispec.Descriptor
+ if l, ok := labels[targetImageLayersLabel]; ok {
+ layersStr := strings.Split(l, ",")
+ for i, l := range layersStr {
+ d, err := digest.Parse(l)
+ if err != nil {
+ return nil, err
+ }
+ if d.String() != target.String() {
+ desc := ocispec.Descriptor{Digest: d}
+ if urls, ok := labels[targetImageURLsLabelPrefix+fmt.Sprintf("%d", i)]; ok {
+ desc.URLs = strings.Split(urls, ",")
+ }
+ neighboringLayers = append(neighboringLayers, desc)
+ }
+ }
+ }
+
+ targetDesc := ocispec.Descriptor{
+ Digest: target,
+ Annotations: labels,
+ }
+ if targetURLs, ok := labels[targetURLsLabel]; ok {
+ targetDesc.URLs = append(targetDesc.URLs, strings.Split(targetURLs, ",")...)
+ }
+
+ return []source.Source{
+ {
+ Hosts: hosts,
+ Name: refspec,
+ Target: targetDesc,
+ Manifest: ocispec.Manifest{Layers: append([]ocispec.Descriptor{targetDesc}, neighboringLayers...)},
+ },
+ }, nil
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/keychain/cri/cri.go b/vendor/github.com/containerd/stargz-snapshotter/service/keychain/cri/cri.go
new file mode 100644
index 000000000000..ad2e6910427e
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/keychain/cri/cri.go
@@ -0,0 +1,145 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cri
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/service/resolver"
+ distribution "github.com/distribution/reference"
+ runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
+)
+
+// NewCRIKeychain provides creds passed through CRI PullImage API.
+// This also returns a CRI image service server that works as a proxy backed by the specified CRI service.
+// This server reads all PullImageRequest and uses PullImageRequest.AuthConfig for authenticating snapshots.
+func NewCRIKeychain(ctx context.Context, connectCRI func() (runtime.ImageServiceClient, error)) (resolver.Credential, runtime.ImageServiceServer) {
+ server := &instrumentedService{config: make(map[string]*runtime.AuthConfig)}
+ go func() {
+ log.G(ctx).Debugf("Waiting for CRI service is started...")
+ for i := 0; i < 100; i++ {
+ client, err := connectCRI()
+ if err == nil {
+ server.criMu.Lock()
+ server.cri = client
+ server.criMu.Unlock()
+ log.G(ctx).Info("connected to backend CRI service")
+ return
+ }
+ log.G(ctx).WithError(err).Warnf("failed to connect to CRI")
+ time.Sleep(10 * time.Second)
+ }
+ log.G(ctx).Warnf("no connection is available to CRI")
+ }()
+ return server.credentials, server
+}
+
+type instrumentedService struct {
+ cri runtime.ImageServiceClient
+ criMu sync.Mutex
+
+ config map[string]*runtime.AuthConfig
+ configMu sync.Mutex
+}
+
+func (in *instrumentedService) credentials(host string, refspec reference.Spec) (string, string, error) {
+ if host == "docker.io" || host == "registry-1.docker.io" {
+ // Creds of "docker.io" is stored keyed by "https://index.docker.io/v1/".
+ host = "index.docker.io"
+ }
+ in.configMu.Lock()
+ defer in.configMu.Unlock()
+ if cfg, ok := in.config[refspec.String()]; ok {
+ return resolver.ParseAuth(cfg, host)
+ }
+ return "", "", nil
+}
+
+func (in *instrumentedService) getCRI() (c runtime.ImageServiceClient) {
+ in.criMu.Lock()
+ c = in.cri
+ in.criMu.Unlock()
+ return
+}
+
+func (in *instrumentedService) ListImages(ctx context.Context, r *runtime.ListImagesRequest) (res *runtime.ListImagesResponse, err error) {
+ cri := in.getCRI()
+ if cri == nil {
+ return nil, errors.New("server is not initialized yet")
+ }
+ return cri.ListImages(ctx, r)
+}
+
+func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (res *runtime.ImageStatusResponse, err error) {
+ cri := in.getCRI()
+ if cri == nil {
+ return nil, errors.New("server is not initialized yet")
+ }
+ return cri.ImageStatus(ctx, r)
+}
+
+func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (res *runtime.PullImageResponse, err error) {
+ cri := in.getCRI()
+ if cri == nil {
+ return nil, errors.New("server is not initialized yet")
+ }
+ refspec, err := parseReference(r.GetImage().GetImage())
+ if err != nil {
+ return nil, err
+ }
+ in.configMu.Lock()
+ in.config[refspec.String()] = r.GetAuth()
+ in.configMu.Unlock()
+ return cri.PullImage(ctx, r)
+}
+
+func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (_ *runtime.RemoveImageResponse, err error) {
+ cri := in.getCRI()
+ if cri == nil {
+ return nil, errors.New("server is not initialized yet")
+ }
+ refspec, err := parseReference(r.GetImage().GetImage())
+ if err != nil {
+ return nil, err
+ }
+ in.configMu.Lock()
+ delete(in.config, refspec.String())
+ in.configMu.Unlock()
+ return cri.RemoveImage(ctx, r)
+}
+
+func (in *instrumentedService) ImageFsInfo(ctx context.Context, r *runtime.ImageFsInfoRequest) (res *runtime.ImageFsInfoResponse, err error) {
+ cri := in.getCRI()
+ if cri == nil {
+ return nil, errors.New("server is not initialized yet")
+ }
+ return cri.ImageFsInfo(ctx, r)
+}
+
+func parseReference(ref string) (reference.Spec, error) {
+ namedRef, err := distribution.ParseDockerRef(ref)
+ if err != nil {
+ return reference.Spec{}, fmt.Errorf("failed to parse image reference %q: %w", ref, err)
+ }
+ return reference.Parse(namedRef.String())
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/keychain/dockerconfig/dockerconfig.go b/vendor/github.com/containerd/stargz-snapshotter/service/keychain/dockerconfig/dockerconfig.go
new file mode 100644
index 000000000000..5658b114f147
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/keychain/dockerconfig/dockerconfig.go
@@ -0,0 +1,49 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dockerconfig
+
+import (
+ "context"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/service/resolver"
+ "github.com/docker/cli/cli/config"
+)
+
+func NewDockerconfigKeychain(ctx context.Context) resolver.Credential {
+ return func(host string, refspec reference.Spec) (string, string, error) {
+ cf, err := config.Load("")
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to load docker config file")
+ return "", "", nil
+ }
+
+ if host == "docker.io" || host == "registry-1.docker.io" {
+ // Creds of docker.io is stored keyed by "https://index.docker.io/v1/".
+ host = "https://index.docker.io/v1/"
+ }
+ ac, err := cf.GetAuthConfig(host)
+ if err != nil {
+ return "", "", err
+ }
+ if ac.IdentityToken != "" {
+ return "", ac.IdentityToken, nil
+ }
+ return ac.Username, ac.Password, nil
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/keychain/kubeconfig/kubeconfig.go b/vendor/github.com/containerd/stargz-snapshotter/service/keychain/kubeconfig/kubeconfig.go
new file mode 100644
index 000000000000..8921ccb2843b
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/keychain/kubeconfig/kubeconfig.go
@@ -0,0 +1,262 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package kubeconfig
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/log"
+ "github.com/containerd/stargz-snapshotter/service/resolver"
+ dcfile "github.com/docker/cli/cli/config/configfile"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/client-go/util/workqueue"
+)
+
+const dockerconfigSelector = "type=" + string(corev1.SecretTypeDockerConfigJson)
+
+type options struct {
+ kubeconfigPath string
+}
+
+type Option func(*options)
+
+func WithKubeconfigPath(path string) Option {
+ return func(opts *options) {
+ opts.kubeconfigPath = path
+ }
+}
+
+// NewKubeconfigKeychain provides a keychain which can sync its contents with
+// kubernetes API server by fetching all `kubernetes.io/dockerconfigjson`
+// secrets in the cluster with provided kubeconfig. It's OK that config provides
+// kubeconfig path but the file doesn't exist at that moment. In this case, this
+// keychain keeps on trying to read the specified path periodically and when the
+// file is actually provided, this keychain tries to access API server using the
+// file. This is useful for some environments (e.g. single node cluster with
+// containerized apiserver) where stargz snapshotter needs to start before
+// everything, including booting containerd/kubelet/apiserver and configuring
+// users/roles.
+// TODO: support update of kubeconfig file
+func NewKubeconfigKeychain(ctx context.Context, opts ...Option) resolver.Credential {
+ var kcOpts options
+ for _, o := range opts {
+ o(&kcOpts)
+ }
+ kc := newKeychain(ctx, kcOpts.kubeconfigPath)
+ return kc.credentials
+}
+
+func newKeychain(ctx context.Context, kubeconfigPath string) *keychain {
+ kc := &keychain{
+ config: make(map[string]*dcfile.ConfigFile),
+ }
+ ctx = log.WithLogger(ctx, log.G(ctx).WithField("kubeconfig", kubeconfigPath))
+ go func() {
+ if kubeconfigPath != "" {
+ log.G(ctx).Debugf("Waiting for kubeconfig being installed...")
+ for {
+ if _, err := os.Stat(kubeconfigPath); err == nil {
+ break
+ } else if !os.IsNotExist(err) {
+ log.G(ctx).WithError(err).
+ Warnf("failed to read; Disabling syncing")
+ return
+ }
+ time.Sleep(10 * time.Second)
+ }
+ }
+
+ // default loader for KUBECONFIG or `~/.kube/config`
+ // if no explicit path provided, KUBECONFIG will be used.
+ // if KUBECONFIG doesn't contain paths, `~/.kube/config` will be used.
+ loadingRule := clientcmd.NewDefaultClientConfigLoadingRules()
+
+ // explicitly provide path for kubeconfig.
+ // if path isn't "", this path will be respected.
+ loadingRule.ExplicitPath = kubeconfigPath
+
+ // load and merge config files
+ clientcfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
+ loadingRule, // loader for config files
+ &clientcmd.ConfigOverrides{}, // no overrides for config
+ ).ClientConfig()
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to load config; Disabling syncing")
+ return
+ }
+
+ client, err := kubernetes.NewForConfig(clientcfg)
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to prepare client; Disabling syncing")
+ return
+ }
+ if err := kc.startSyncSecrets(ctx, client); err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to sync secrets")
+ }
+ }()
+ return kc
+}
+
+type keychain struct {
+ config map[string]*dcfile.ConfigFile
+ configMu sync.Mutex
+
+ // the following entries are used for syncing secrets with API server.
+ // these fields are lazily filled after kubeconfig file is provided.
+ queue *workqueue.Type
+ informer cache.SharedIndexInformer
+}
+
+func (kc *keychain) credentials(host string, refspec reference.Spec) (string, string, error) {
+ if host == "docker.io" || host == "registry-1.docker.io" {
+ // Creds of "docker.io" is stored keyed by "https://index.docker.io/v1/".
+ host = "https://index.docker.io/v1/"
+ }
+ kc.configMu.Lock()
+ defer kc.configMu.Unlock()
+ for _, cfg := range kc.config {
+ if acfg, err := cfg.GetAuthConfig(host); err == nil {
+ if acfg.IdentityToken != "" {
+ return "", acfg.IdentityToken, nil
+ } else if !(acfg.Username == "" && acfg.Password == "") {
+ return acfg.Username, acfg.Password, nil
+ }
+ }
+ }
+ return "", "", nil
+}
+
+func (kc *keychain) startSyncSecrets(ctx context.Context, client kubernetes.Interface) error {
+
+ // don't let panics crash the process
+ defer utilruntime.HandleCrash()
+
+ // get informed on `kubernetes.io/dockerconfigjson` secrets in all namespaces
+ informer := cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ // TODO: support legacy image secret `kubernetes.io/dockercfg`
+ options.FieldSelector = dockerconfigSelector
+ return client.CoreV1().Secrets(metav1.NamespaceAll).List(ctx, options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ // TODO: support legacy image secret `kubernetes.io/dockercfg`
+ options.FieldSelector = dockerconfigSelector
+ return client.CoreV1().Secrets(metav1.NamespaceAll).Watch(ctx, options)
+ },
+ },
+ &corev1.Secret{},
+ 0,
+ cache.Indexers{},
+ )
+
+ // use workqueue because each task possibly takes long for parsing config,
+ // wating for lock, etc...
+ queue := workqueue.New()
+ defer queue.ShutDown()
+ informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ key, err := cache.MetaNamespaceKeyFunc(obj)
+ if err == nil {
+ queue.Add(key)
+ }
+ },
+ UpdateFunc: func(old, new interface{}) {
+ key, err := cache.MetaNamespaceKeyFunc(new)
+ if err == nil {
+ queue.Add(key)
+ }
+ },
+ DeleteFunc: func(obj interface{}) {
+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
+ if err == nil {
+ queue.Add(key)
+ }
+ },
+ })
+ go informer.Run(ctx.Done())
+ if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
+ return fmt.Errorf("Timed out for syncing cache")
+ }
+
+ // get informer and queue
+ kc.informer = informer
+ kc.queue = queue
+
+ // keep on syncing secrets
+ wait.Until(kc.runWorker, time.Second, ctx.Done())
+
+ return nil
+}
+
+func (kc *keychain) runWorker() {
+ for kc.processNextItem() {
+ // continue looping
+ }
+}
+
+// TODO: consider retrying?
+func (kc *keychain) processNextItem() bool {
+ key, quit := kc.queue.Get()
+ if quit {
+ return false
+ }
+ defer kc.queue.Done(key)
+
+ obj, exists, err := kc.informer.GetIndexer().GetByKey(key.(string))
+ if err != nil {
+ utilruntime.HandleError(fmt.Errorf("failed to get object; don't sync %q: %v", key, err))
+ return true
+ }
+ if !exists {
+ kc.configMu.Lock()
+ delete(kc.config, key.(string))
+ kc.configMu.Unlock()
+ return true
+ }
+
+ // TODO: support legacy image secret `kubernetes.io/dockercfg`
+ data, ok := obj.(*corev1.Secret).Data[corev1.DockerConfigJsonKey]
+ if !ok {
+ utilruntime.HandleError(fmt.Errorf("no secret is provided; don't sync %q", key))
+ return true
+ }
+ configFile := dcfile.New("")
+ if err := configFile.LoadFromReader(bytes.NewReader(data)); err != nil {
+ utilruntime.HandleError(fmt.Errorf("broken data; don't sync %q: %v", key, err))
+ return true
+ }
+ kc.configMu.Lock()
+ kc.config[key.(string)] = configFile
+ kc.configMu.Unlock()
+
+ return true
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/plugin/plugin.go b/vendor/github.com/containerd/stargz-snapshotter/service/plugin/plugin.go
new file mode 100644
index 000000000000..b82bdd63a6de
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/plugin/plugin.go
@@ -0,0 +1,23 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package plugin
+
+import "github.com/containerd/stargz-snapshotter/service/plugincore"
+
+func init() {
+ plugincore.RegisterPlugin()
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/plugincore/plugin.go b/vendor/github.com/containerd/stargz-snapshotter/service/plugincore/plugin.go
new file mode 100644
index 000000000000..69125d622953
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/plugincore/plugin.go
@@ -0,0 +1,152 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package plugincore
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/containerd/containerd/v2/defaults"
+ "github.com/containerd/containerd/v2/pkg/dialer"
+ ctdplugins "github.com/containerd/containerd/v2/plugins"
+ "github.com/containerd/log"
+ "github.com/containerd/platforms"
+ "github.com/containerd/plugin"
+ "github.com/containerd/plugin/registry"
+ "github.com/containerd/stargz-snapshotter/service"
+ "github.com/containerd/stargz-snapshotter/service/keychain/cri"
+ "github.com/containerd/stargz-snapshotter/service/keychain/dockerconfig"
+ "github.com/containerd/stargz-snapshotter/service/keychain/kubeconfig"
+ "github.com/containerd/stargz-snapshotter/service/resolver"
+ grpc "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials/insecure"
+ runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
+)
+
+// Config represents configuration for the stargz snapshotter plugin.
+type Config struct {
+ service.Config
+
+ // RootPath is the directory for the plugin
+ RootPath string `toml:"root_path"`
+
+ // CRIKeychainImageServicePath is the path to expose CRI service wrapped by CRI keychain
+ CRIKeychainImageServicePath string `toml:"cri_keychain_image_service_path"`
+
+ // Registry is CRI-plugin-compatible registry configuration
+ Registry resolver.Registry `toml:"registry"`
+}
+
+func RegisterPlugin() {
+ registry.Register(&plugin.Registration{
+ Type: ctdplugins.SnapshotPlugin,
+ ID: "stargz",
+ Config: &Config{},
+ InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+ ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec())
+ ctx := ic.Context
+
+ config, ok := ic.Config.(*Config)
+ if !ok {
+ return nil, errors.New("invalid stargz snapshotter configuration")
+ }
+
+ root := ic.Properties[ctdplugins.PropertyRootDir]
+ if config.RootPath != "" {
+ root = config.RootPath
+ }
+ ic.Meta.Exports["root"] = root
+
+ // Configure keychain
+ credsFuncs := []resolver.Credential{dockerconfig.NewDockerconfigKeychain(ctx)}
+ if config.Config.KubeconfigKeychainConfig.EnableKeychain {
+ var opts []kubeconfig.Option
+ if kcp := config.Config.KubeconfigKeychainConfig.KubeconfigPath; kcp != "" {
+ opts = append(opts, kubeconfig.WithKubeconfigPath(kcp))
+ }
+ credsFuncs = append(credsFuncs, kubeconfig.NewKubeconfigKeychain(ctx, opts...))
+ }
+ if addr := config.CRIKeychainImageServicePath; config.Config.CRIKeychainConfig.EnableKeychain && addr != "" {
+ // connects to the backend CRI service (defaults to containerd socket)
+ criAddr := ic.Properties[ctdplugins.PropertyGRPCAddress]
+ if cp := config.Config.CRIKeychainConfig.ImageServicePath; cp != "" {
+ criAddr = cp
+ }
+ if criAddr == "" {
+ return nil, errors.New("backend CRI service address is not specified")
+ }
+ connectCRI := func() (runtime.ImageServiceClient, error) {
+ conn, err := newCRIConn(criAddr)
+ if err != nil {
+ return nil, err
+ }
+ return runtime.NewImageServiceClient(conn), nil
+ }
+ criCreds, criServer := cri.NewCRIKeychain(ctx, connectCRI)
+ // Create a gRPC server
+ rpc := grpc.NewServer()
+ runtime.RegisterImageServiceServer(rpc, criServer)
+ // Prepare the directory for the socket
+ if err := os.MkdirAll(filepath.Dir(addr), 0700); err != nil {
+ return nil, fmt.Errorf("failed to create directory %q: %w", filepath.Dir(addr), err)
+ }
+ // Try to remove the socket file to avoid EADDRINUSE
+ if err := os.RemoveAll(addr); err != nil {
+ return nil, fmt.Errorf("failed to remove %q: %w", addr, err)
+ }
+ // Listen and serve
+ l, err := net.Listen("unix", addr)
+ if err != nil {
+ return nil, fmt.Errorf("error on listen socket %q: %w", addr, err)
+ }
+ go func() {
+ if err := rpc.Serve(l); err != nil {
+ log.G(ctx).WithError(err).Warnf("error on serving via socket %q", addr)
+ }
+ }()
+ credsFuncs = append(credsFuncs, criCreds)
+ }
+
+ // TODO(ktock): print warn if old configuration is specified.
+ // TODO(ktock): should we respect old configuration?
+ return service.NewStargzSnapshotterService(ctx, root, &config.Config,
+ service.WithCustomRegistryHosts(resolver.RegistryHostsFromCRIConfig(ctx, config.Registry, credsFuncs...)))
+ },
+ })
+}
+
+func newCRIConn(criAddr string) (*grpc.ClientConn, error) {
+ // TODO: make gRPC options configurable from config.toml
+ backoffConfig := backoff.DefaultConfig
+ backoffConfig.MaxDelay = 3 * time.Second
+ connParams := grpc.ConnectParams{
+ Backoff: backoffConfig,
+ }
+ gopts := []grpc.DialOption{
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithConnectParams(connParams),
+ grpc.WithContextDialer(dialer.ContextDialer),
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
+ grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
+ }
+ return grpc.NewClient(dialer.DialAddress(criAddr), gopts...)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/resolver/cri.go b/vendor/github.com/containerd/stargz-snapshotter/service/resolver/cri.go
new file mode 100644
index 000000000000..43f1dda9b9d6
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/resolver/cri.go
@@ -0,0 +1,349 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package resolver
+
+// =====
+// This is CRI-plugin-compatible registry hosts configuration.
+// Some functions are ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri as noted on each one.
+// TODO: import them from CRI package once we drop support to continerd v1.5.x (cri v1alpha2)
+// =====
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ dconfig "github.com/containerd/containerd/v2/core/remotes/docker/config"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ rhttp "github.com/hashicorp/go-retryablehttp"
+ runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
+)
+
+// Registry is registry settings configured
+type Registry struct {
+ // ConfigPath is a path to the root directory containing registry-specific
+ // configurations.
+ // If ConfigPath is set, the rest of the registry specific options are ignored.
+ ConfigPath string `toml:"config_path" json:"configPath"`
+ // Mirrors are namespace to mirror mapping for all namespaces.
+ // This option will not be used when ConfigPath is provided.
+ // DEPRECATED: Use ConfigPath instead. Remove in containerd 1.7.
+ Mirrors map[string]Mirror `toml:"mirrors" json:"mirrors"`
+ // Configs are configs for each registry.
+ // The key is the domain name or IP of the registry.
+ // This option will be fully deprecated for ConfigPath in the future.
+ Configs map[string]RegistryConfig `toml:"configs" json:"configs"`
+}
+
+// Mirror contains the config related to the registry mirror
+type Mirror struct {
+ // Endpoints are endpoints for a namespace. CRI plugin will try the endpoints
+ // one by one until a working one is found. The endpoint must be a valid url
+ // with host specified.
+ // The scheme, host and path from the endpoint URL will be used.
+ Endpoints []string `toml:"endpoint" json:"endpoint"`
+}
+
+// RegistryConfig contains configuration used to communicate with the registry.
+type RegistryConfig struct {
+ // Auth contains information to authenticate to the registry.
+ Auth *AuthConfig `toml:"auth" json:"auth"`
+ // TLS is a pair of CA/Cert/Key which then are used when creating the transport
+ // that communicates with the registry.
+ // This field will not be used when ConfigPath is provided.
+ // DEPRECATED: Use ConfigPath instead. Remove in containerd 1.7.
+ TLS *TLSConfig `toml:"tls" json:"tls"`
+}
+
+// AuthConfig contains the config related to authentication to a specific registry
+type AuthConfig struct {
+ // Username is the username to login the registry.
+ Username string `toml:"username" json:"username"`
+ // Password is the password to login the registry.
+ Password string `toml:"password" json:"password"`
+ // Auth is a base64 encoded string from the concatenation of the username,
+ // a colon, and the password.
+ Auth string `toml:"auth" json:"auth"`
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `toml:"identitytoken" json:"identitytoken"`
+}
+
+// TLSConfig contains the CA/Cert/Key used for a registry
+type TLSConfig struct {
+ InsecureSkipVerify bool `toml:"insecure_skip_verify" json:"insecure_skip_verify"`
+ CAFile string `toml:"ca_file" json:"caFile"`
+ CertFile string `toml:"cert_file" json:"certFile"`
+ KeyFile string `toml:"key_file" json:"keyFile"`
+}
+
+// RegistryHostsFromCRIConfig creates RegistryHosts (a set of registry configuration) from CRI-plugin-compatible config.
+// NOTE: ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L332-L405
+func RegistryHostsFromCRIConfig(ctx context.Context, config Registry, credsFuncs ...Credential) source.RegistryHosts {
+ paths := filepath.SplitList(config.ConfigPath)
+ if len(paths) > 0 {
+ return func(ref reference.Spec) ([]docker.RegistryHost, error) {
+ hostOptions := dconfig.HostOptions{}
+ hostOptions.Credentials = multiCredsFuncs(ref, append(credsFuncs, func(host string, ref reference.Spec) (string, string, error) {
+ config := config.Configs[host]
+ if config.Auth != nil {
+ return ParseAuth(toRuntimeAuthConfig(*config.Auth), host)
+ }
+ return "", "", nil
+ })...)
+ hostOptions.HostDir = hostDirFromRoots(paths)
+ return dconfig.ConfigureHosts(ctx, hostOptions)(ref.Hostname())
+ }
+ }
+ return func(ref reference.Spec) ([]docker.RegistryHost, error) {
+ host := ref.Hostname()
+ var registries []docker.RegistryHost
+
+ endpoints, err := registryEndpoints(config, host)
+ if err != nil {
+ return nil, fmt.Errorf("get registry endpoints: %w", err)
+ }
+ for _, e := range endpoints {
+ u, err := url.Parse(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse registry endpoint %q from mirrors: %w", e, err)
+ }
+
+ var (
+ rclient = rhttp.NewClient()
+ config = config.Configs[u.Host]
+ )
+
+ rclient.Logger = nil // disable logging every request
+
+ if config.TLS != nil {
+ if tr, ok := rclient.HTTPClient.Transport.(*http.Transport); ok {
+ tr.TLSClientConfig, err = getTLSConfig(*config.TLS)
+ if err != nil {
+ return nil, fmt.Errorf("get TLSConfig for registry %q: %w", e, err)
+ }
+ } else {
+ return nil, errors.New("TLS config cannot be applied; Client.Transport is not *http.Transport")
+ }
+ }
+
+ client := rclient.StandardClient()
+ authorizer := docker.NewDockerAuthorizer(
+ docker.WithAuthClient(client),
+ docker.WithAuthCreds(multiCredsFuncs(ref, credsFuncs...)))
+
+ if u.Path == "" {
+ u.Path = "/v2"
+ }
+
+ registries = append(registries, docker.RegistryHost{
+ Client: client,
+ Authorizer: authorizer,
+ Host: u.Host,
+ Scheme: u.Scheme,
+ Path: u.Path,
+ Capabilities: docker.HostCapabilityResolve | docker.HostCapabilityPull,
+ })
+ }
+ return registries, nil
+ }
+}
+
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330
+func hostDirFromRoots(roots []string) func(string) (string, error) {
+ rootfn := make([]func(string) (string, error), len(roots))
+ for i := range roots {
+ rootfn[i] = dconfig.HostDirFromRoot(roots[i])
+ }
+ return func(host string) (dir string, err error) {
+ for _, fn := range rootfn {
+ dir, err = fn(host)
+ if (err != nil && !errdefs.IsNotFound(err)) || (dir != "") {
+ break
+ }
+ }
+ return
+ }
+}
+
+// toRuntimeAuthConfig converts cri plugin auth config to runtime auth config.
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/helpers.go#L295-L303
+func toRuntimeAuthConfig(a AuthConfig) *runtime.AuthConfig {
+ return &runtime.AuthConfig{
+ Username: a.Username,
+ Password: a.Password,
+ Auth: a.Auth,
+ IdentityToken: a.IdentityToken,
+ }
+}
+
+// getTLSConfig returns a TLSConfig configured with a CA/Cert/Key specified by registryTLSConfig
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330
+func getTLSConfig(registryTLSConfig TLSConfig) (*tls.Config, error) {
+ var (
+ tlsConfig = &tls.Config{}
+ cert tls.Certificate
+ err error
+ )
+ if registryTLSConfig.CertFile != "" && registryTLSConfig.KeyFile == "" {
+ return nil, fmt.Errorf("cert file %q was specified, but no corresponding key file was specified", registryTLSConfig.CertFile)
+ }
+ if registryTLSConfig.CertFile == "" && registryTLSConfig.KeyFile != "" {
+ return nil, fmt.Errorf("key file %q was specified, but no corresponding cert file was specified", registryTLSConfig.KeyFile)
+ }
+ if registryTLSConfig.CertFile != "" && registryTLSConfig.KeyFile != "" {
+ cert, err = tls.LoadX509KeyPair(registryTLSConfig.CertFile, registryTLSConfig.KeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load cert file: %w", err)
+ }
+ if len(cert.Certificate) != 0 {
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+ tlsConfig.BuildNameToCertificate() // nolint:staticcheck
+ }
+
+ if registryTLSConfig.CAFile != "" {
+ caCertPool, err := x509.SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get system cert pool: %w", err)
+ }
+ caCert, err := os.ReadFile(registryTLSConfig.CAFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load CA file: %w", err)
+ }
+ caCertPool.AppendCertsFromPEM(caCert)
+ tlsConfig.RootCAs = caCertPool
+ }
+
+ tlsConfig.InsecureSkipVerify = registryTLSConfig.InsecureSkipVerify
+ return tlsConfig, nil
+}
+
+// defaultScheme returns the default scheme for a registry host.
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330
+func defaultScheme(host string) string {
+ if h, _, err := net.SplitHostPort(host); err == nil {
+ host = h
+ }
+ if host == "localhost" || host == "127.0.0.1" || host == "::1" {
+ return "http"
+ }
+ return "https"
+}
+
+// addDefaultScheme returns the endpoint with default scheme
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L316-L330
+func addDefaultScheme(endpoint string) (string, error) {
+ if strings.Contains(endpoint, "://") {
+ return endpoint, nil
+ }
+ ue := "dummy://" + endpoint
+ u, err := url.Parse(ue)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s://%s", defaultScheme(u.Host), endpoint), nil
+}
+
+// registryEndpoints returns endpoints for a given host.
+// It adds default registry endpoint if it does not exist in the passed-in endpoint list.
+// It also supports wildcard host matching with `*`.
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L431-L464
+func registryEndpoints(config Registry, host string) ([]string, error) {
+ var endpoints []string
+ _, ok := config.Mirrors[host]
+ if ok {
+ endpoints = config.Mirrors[host].Endpoints
+ } else {
+ endpoints = config.Mirrors["*"].Endpoints
+ }
+ defaultHost, err := docker.DefaultHost(host)
+ if err != nil {
+ return nil, fmt.Errorf("get default host: %w", err)
+ }
+ for i := range endpoints {
+ en, err := addDefaultScheme(endpoints[i])
+ if err != nil {
+ return nil, fmt.Errorf("parse endpoint url: %w", err)
+ }
+ endpoints[i] = en
+ }
+ for _, e := range endpoints {
+ u, err := url.Parse(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse endpoint url: %w", err)
+ }
+ if u.Host == host {
+ // Do not add default if the endpoint already exists.
+ return endpoints, nil
+ }
+ }
+ return append(endpoints, defaultScheme(defaultHost)+"://"+defaultHost), nil
+}
+
+// ParseAuth parses AuthConfig and returns username and password/secret required by containerd.
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.5.2/pkg/cri/server/image_pull.go#L176-L214
+func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) {
+ if auth == nil {
+ return "", "", nil
+ }
+ if auth.ServerAddress != "" {
+ // Do not return the auth info when server address doesn't match.
+ u, err := url.Parse(auth.ServerAddress)
+ if err != nil {
+ return "", "", fmt.Errorf("parse server address: %w", err)
+ }
+ if host != u.Host {
+ return "", "", nil
+ }
+ }
+ if auth.Username != "" {
+ return auth.Username, auth.Password, nil
+ }
+ if auth.IdentityToken != "" {
+ return "", auth.IdentityToken, nil
+ }
+ if auth.Auth != "" {
+ decLen := base64.StdEncoding.DecodedLen(len(auth.Auth))
+ decoded := make([]byte, decLen)
+ _, err := base64.StdEncoding.Decode(decoded, []byte(auth.Auth))
+ if err != nil {
+ return "", "", err
+ }
+ fields := strings.SplitN(string(decoded), ":", 2)
+ if len(fields) != 2 {
+ return "", "", fmt.Errorf("invalid decoded auth: %q", decoded)
+ }
+ user, passwd := fields[0], fields[1]
+ return user, strings.Trim(passwd, "\x00"), nil
+ }
+ // TODO(random-liu): Support RegistryToken.
+ // An empty auth config is valid for anonymous registry
+ return "", "", nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/resolver/registry.go b/vendor/github.com/containerd/stargz-snapshotter/service/resolver/registry.go
new file mode 100644
index 000000000000..c5ea2276ffeb
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/resolver/registry.go
@@ -0,0 +1,149 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package resolver
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ rhttp "github.com/hashicorp/go-retryablehttp"
+)
+
+const defaultRequestTimeoutSec = 30
+
+// Config is config for resolving registries.
+type Config struct {
+ Host map[string]HostConfig `toml:"host"`
+}
+
+type HostConfig struct {
+ Mirrors []MirrorConfig `toml:"mirrors"`
+}
+
+type MirrorConfig struct {
+
+ // Host is the hostname of the host.
+ Host string `toml:"host"`
+
+ // Insecure is true means use http scheme instead of https.
+ Insecure bool `toml:"insecure"`
+
+ // RequestTimeoutSec is timeout seconds of each request to the registry.
+ // RequestTimeoutSec == 0 indicates the default timeout (defaultRequestTimeoutSec).
+ // RequestTimeoutSec < 0 indicates no timeout.
+ RequestTimeoutSec int `toml:"request_timeout_sec"`
+
+ // Header are additional headers to send to the server
+ Header map[string]interface{} `toml:"header"`
+}
+
+type Credential func(string, reference.Spec) (string, string, error)
+
+// RegistryHostsFromConfig creates RegistryHosts (a set of registry configuration) from Config.
+func RegistryHostsFromConfig(cfg Config, credsFuncs ...Credential) source.RegistryHosts {
+ return func(ref reference.Spec) (hosts []docker.RegistryHost, _ error) {
+ host := ref.Hostname()
+ for _, h := range append(cfg.Host[host].Mirrors, MirrorConfig{
+ Host: host,
+ }) {
+ client := rhttp.NewClient()
+ client.Logger = nil // disable logging every request
+ if h.RequestTimeoutSec >= 0 {
+ if h.RequestTimeoutSec == 0 {
+ client.HTTPClient.Timeout = defaultRequestTimeoutSec * time.Second
+ } else {
+ client.HTTPClient.Timeout = time.Duration(h.RequestTimeoutSec) * time.Second
+ }
+ } // h.RequestTimeoutSec < 0 means "no timeout"
+ tr := client.StandardClient()
+ var header http.Header
+ var err error
+ if h.Header != nil {
+ header = http.Header{}
+ for key, ty := range h.Header {
+ switch value := ty.(type) {
+ case string:
+ header[key] = []string{value}
+ case []interface{}:
+ header[key], err = makeStringSlice(value, nil)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("invalid type %v for header %q", ty, key)
+ }
+ }
+ }
+ config := docker.RegistryHost{
+ Client: tr,
+ Host: h.Host,
+ Scheme: "https",
+ Path: "/v2",
+ Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve,
+ Authorizer: docker.NewDockerAuthorizer(
+ docker.WithAuthClient(tr),
+ docker.WithAuthCreds(multiCredsFuncs(ref, credsFuncs...))),
+ Header: header,
+ }
+ if localhost, _ := docker.MatchLocalhost(config.Host); localhost || h.Insecure {
+ config.Scheme = "http"
+ }
+ if config.Host == "docker.io" {
+ config.Host = "registry-1.docker.io"
+ }
+ hosts = append(hosts, config)
+ }
+ return
+ }
+}
+
+func multiCredsFuncs(ref reference.Spec, credsFuncs ...Credential) func(string) (string, string, error) {
+ return func(host string) (string, string, error) {
+ for _, f := range credsFuncs {
+ if username, secret, err := f(host, ref); err != nil {
+ return "", "", err
+ } else if !(username == "" && secret == "") {
+ return username, secret, nil
+ }
+ }
+ return "", "", nil
+ }
+}
+
+// makeStringSlice is a helper func to convert from []interface{} to []string.
+// Additionally an optional cb func may be passed to perform string mapping.
+// NOTE: Ported from /~https://github.com/containerd/containerd/blob/v1.6.9/remotes/docker/config/hosts.go#L516-L533
+func makeStringSlice(slice []interface{}, cb func(string) string) ([]string, error) {
+ out := make([]string, len(slice))
+ for i, value := range slice {
+ str, ok := value.(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to cast %v to string", value)
+ }
+
+ if cb != nil {
+ out[i] = cb(str)
+ } else {
+ out[i] = str
+ }
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/service/service.go b/vendor/github.com/containerd/stargz-snapshotter/service/service.go
new file mode 100644
index 000000000000..28c49b9feecd
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/service/service.go
@@ -0,0 +1,145 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package service
+
+import (
+ "context"
+ "path/filepath"
+
+ "github.com/containerd/containerd/v2/core/snapshots"
+ "github.com/containerd/containerd/v2/pkg/reference"
+ "github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils"
+ "github.com/containerd/log"
+ stargzfs "github.com/containerd/stargz-snapshotter/fs"
+ "github.com/containerd/stargz-snapshotter/fs/layer"
+ "github.com/containerd/stargz-snapshotter/fs/source"
+ "github.com/containerd/stargz-snapshotter/metadata"
+ esgzexternaltoc "github.com/containerd/stargz-snapshotter/nativeconverter/estargz/externaltoc"
+ "github.com/containerd/stargz-snapshotter/service/resolver"
+ snbase "github.com/containerd/stargz-snapshotter/snapshot"
+ "github.com/hashicorp/go-multierror"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type Option func(*options)
+
+type options struct {
+ credsFuncs []resolver.Credential
+ registryHosts source.RegistryHosts
+ fsOpts []stargzfs.Option
+}
+
+// WithCredsFuncs specifies credsFuncs to be used for connecting to the registries.
+func WithCredsFuncs(creds ...resolver.Credential) Option {
+ return func(o *options) {
+ o.credsFuncs = append(o.credsFuncs, creds...)
+ }
+}
+
+// WithCustomRegistryHosts is registry hosts to use instead.
+func WithCustomRegistryHosts(hosts source.RegistryHosts) Option {
+ return func(o *options) {
+ o.registryHosts = hosts
+ }
+}
+
+// WithFilesystemOptions allowes to pass filesystem-related configuration.
+func WithFilesystemOptions(opts ...stargzfs.Option) Option {
+ return func(o *options) {
+ o.fsOpts = opts
+ }
+}
+
+// NewStargzSnapshotterService returns stargz snapshotter.
+func NewStargzSnapshotterService(ctx context.Context, root string, config *Config, opts ...Option) (snapshots.Snapshotter, error) {
+ var sOpts options
+ for _, o := range opts {
+ o(&sOpts)
+ }
+
+ hosts := sOpts.registryHosts
+ if hosts == nil {
+ // Use RegistryHosts based on ResolverConfig and keychain
+ hosts = resolver.RegistryHostsFromConfig(resolver.Config(config.ResolverConfig), sOpts.credsFuncs...)
+ }
+
+ userxattr, err := overlayutils.NeedsUserXAttr(snapshotterRoot(root))
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr)
+ }
+ opq := layer.OverlayOpaqueTrusted
+ if userxattr {
+ opq = layer.OverlayOpaqueUser
+ }
+ // Configure filesystem and snapshotter
+ fsOpts := append(sOpts.fsOpts, stargzfs.WithGetSources(sources(
+ sourceFromCRILabels(hosts), // provides source info based on CRI labels
+ source.FromDefaultLabels(hosts), // provides source info based on default labels
+ )),
+ stargzfs.WithOverlayOpaqueType(opq),
+ stargzfs.WithAdditionalDecompressors(func(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) []metadata.Decompressor {
+ return []metadata.Decompressor{esgzexternaltoc.NewRemoteDecompressor(ctx, hosts, refspec, desc)}
+ }),
+ )
+ fs, err := stargzfs.NewFilesystem(fsRoot(root), config.Config, fsOpts...)
+ if err != nil {
+ log.G(ctx).WithError(err).Fatalf("failed to configure filesystem")
+ }
+
+ var snapshotter snapshots.Snapshotter
+
+ snOpts := []snbase.Opt{snbase.AsynchronousRemove}
+ if config.SnapshotterConfig.AllowInvalidMountsOnRestart {
+ snOpts = append(snOpts, snbase.AllowInvalidMountsOnRestart)
+ }
+
+ snapshotter, err = snbase.NewSnapshotter(ctx, snapshotterRoot(root), fs, snOpts...)
+ if err != nil {
+ log.G(ctx).WithError(err).Fatalf("failed to create new snapshotter")
+ }
+
+ return snapshotter, err
+}
+
+func snapshotterRoot(root string) string {
+ return filepath.Join(root, "snapshotter")
+}
+
+func fsRoot(root string) string {
+ return filepath.Join(root, "stargz")
+}
+
+func sources(ps ...source.GetSources) source.GetSources {
+ return func(labels map[string]string) (source []source.Source, allErr error) {
+ for _, p := range ps {
+ src, err := p(labels)
+ if err == nil {
+ return src, nil
+ }
+ allErr = multierror.Append(allErr, err)
+ }
+ return
+ }
+}
+
+// Supported returns nil when the remote snapshotter is functional on the system with the root directory.
+// Supported is not called during plugin initialization, but exposed for downstream projects which uses
+// this snapshotter as a library.
+func Supported(root string) error {
+ // Remote snapshotter is implemented based on overlayfs snapshotter.
+ return overlayutils.Supported(snapshotterRoot(root))
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go b/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go
new file mode 100644
index 000000000000..eac479870068
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go
@@ -0,0 +1,765 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package snapshot
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containerd/containerd/v2/core/mount"
+ "github.com/containerd/containerd/v2/core/snapshots"
+ "github.com/containerd/containerd/v2/core/snapshots/storage"
+ "github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils"
+ "github.com/containerd/continuity/fs"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/log"
+ "github.com/moby/sys/mountinfo"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ targetSnapshotLabel = "containerd.io/snapshot.ref"
+ remoteLabel = "containerd.io/snapshot/remote"
+ remoteLabelVal = "remote snapshot"
+
+ // remoteSnapshotLogKey is a key for log line, which indicates whether
+ // `Prepare` method successfully prepared targeting remote snapshot or not, as
+ // defined in the following:
+ // - "true" : indicates the snapshot has been successfully prepared as a
+ // remote snapshot
+ // - "false" : indicates the snapshot failed to be prepared as a remote
+ // snapshot
+ // - null : undetermined
+ remoteSnapshotLogKey = "remote-snapshot-prepared"
+ prepareSucceeded = "true"
+ prepareFailed = "false"
+)
+
+// FileSystem is a backing filesystem abstraction.
+//
+// Mount() tries to mount a remote snapshot to the specified mount point
+// directory. If succeed, the mountpoint directory will be treated as a layer
+// snapshot. If Mount() fails, the mountpoint directory MUST be cleaned up.
+// Check() is called to check the connectibity of the existing layer snapshot
+// every time the layer is used by containerd.
+// Unmount() is called to unmount a remote snapshot from the specified mount point
+// directory.
+type FileSystem interface {
+ Mount(ctx context.Context, mountpoint string, labels map[string]string) error
+ Check(ctx context.Context, mountpoint string, labels map[string]string) error
+ Unmount(ctx context.Context, mountpoint string) error
+}
+
+// SnapshotterConfig is used to configure the remote snapshotter instance
+type SnapshotterConfig struct {
+ asyncRemove bool
+ noRestore bool
+ allowInvalidMountsOnRestart bool
+}
+
+// Opt is an option to configure the remote snapshotter
+type Opt func(config *SnapshotterConfig) error
+
+// AsynchronousRemove defers removal of filesystem content until
+// the Cleanup method is called. Removals will make the snapshot
+// referred to by the key unavailable and make the key immediately
+// available for re-use.
+func AsynchronousRemove(config *SnapshotterConfig) error {
+ config.asyncRemove = true
+ return nil
+}
+
+func NoRestore(config *SnapshotterConfig) error {
+ config.noRestore = true
+ return nil
+}
+
+func AllowInvalidMountsOnRestart(config *SnapshotterConfig) error {
+ config.allowInvalidMountsOnRestart = true
+ return nil
+}
+
+type snapshotter struct {
+ root string
+ ms *storage.MetaStore
+ asyncRemove bool
+
+ // fs is a filesystem that this snapshotter recognizes.
+ fs FileSystem
+ userxattr bool // whether to enable "userxattr" mount option
+ noRestore bool
+ allowInvalidMountsOnRestart bool
+}
+
+// NewSnapshotter returns a Snapshotter which can use unpacked remote layers
+// as snapshots. This is implemented based on the overlayfs snapshotter, so
+// diffs are stored under the provided root and a metadata file is stored under
+// the root as same as overlayfs snapshotter.
+func NewSnapshotter(ctx context.Context, root string, targetFs FileSystem, opts ...Opt) (snapshots.Snapshotter, error) {
+ if targetFs == nil {
+ return nil, fmt.Errorf("Specify filesystem to use")
+ }
+
+ var config SnapshotterConfig
+ for _, opt := range opts {
+ if err := opt(&config); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, err
+ }
+ supportsDType, err := fs.SupportsDType(root)
+ if err != nil {
+ return nil, err
+ }
+ if !supportsDType {
+ return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root)
+ }
+ ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db"))
+ if err != nil {
+ return nil, err
+ }
+
+ if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+
+ userxattr, err := overlayutils.NeedsUserXAttr(root)
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr)
+ }
+
+ o := &snapshotter{
+ root: root,
+ ms: ms,
+ asyncRemove: config.asyncRemove,
+ fs: targetFs,
+ userxattr: userxattr,
+ noRestore: config.noRestore,
+ allowInvalidMountsOnRestart: config.allowInvalidMountsOnRestart,
+ }
+
+ if err := o.restoreRemoteSnapshot(ctx); err != nil {
+ return nil, fmt.Errorf("failed to restore remote snapshot: %w", err)
+ }
+
+ return o, nil
+}
+
+// Stat returns the info for an active or committed snapshot by name or
+// key.
+//
+// Should be used for parent resolution, existence checks and to discern
+// the kind of snapshot.
+func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+ defer t.Rollback()
+ _, info, _, err := storage.GetInfo(ctx, key)
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+
+ return info, nil
+}
+
+func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return snapshots.Info{}, err
+ }
+
+ info, err = storage.UpdateInfo(ctx, info, fieldpaths...)
+ if err != nil {
+ t.Rollback()
+ return snapshots.Info{}, err
+ }
+
+ if err := t.Commit(); err != nil {
+ return snapshots.Info{}, err
+ }
+
+ return info, nil
+}
+
+// Usage returns the resources taken by the snapshot identified by key.
+//
+// For active snapshots, this will scan the usage of the overlay "diff" (aka
+// "upper") directory and may take some time.
+// for remote snapshots, no scan will be held and recognise the number of inodes
+// and these sizes as "zero".
+//
+// For committed snapshots, the value is returned from the metadata database.
+func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return snapshots.Usage{}, err
+ }
+ id, info, usage, err := storage.GetInfo(ctx, key)
+ t.Rollback() // transaction no longer needed at this point.
+
+ if err != nil {
+ return snapshots.Usage{}, err
+ }
+
+ upperPath := o.upperPath(id)
+
+ if info.Kind == snapshots.KindActive {
+ du, err := fs.DiskUsage(ctx, upperPath)
+ if err != nil {
+ // TODO(stevvooe): Consider not reporting an error in this case.
+ return snapshots.Usage{}, err
+ }
+
+ usage = snapshots.Usage(du)
+ }
+
+ return usage, nil
+}
+
+func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
+ s, err := o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ // Try to prepare the remote snapshot. If succeeded, we commit the snapshot now
+ // and return ErrAlreadyExists.
+ var base snapshots.Info
+ for _, opt := range opts {
+ if err := opt(&base); err != nil {
+ return nil, err
+ }
+ }
+ if target, ok := base.Labels[targetSnapshotLabel]; ok {
+ // NOTE: If passed labels include a target of the remote snapshot, `Prepare`
+ // must log whether this method succeeded to prepare that remote snapshot
+ // or not, using the key `remoteSnapshotLogKey` defined in the above. This
+ // log is used by tests in this project.
+ lCtx := log.WithLogger(ctx, log.G(ctx).WithField("key", key).WithField("parent", parent))
+ if err := o.prepareRemoteSnapshot(lCtx, key, base.Labels); err != nil {
+ log.G(lCtx).WithField(remoteSnapshotLogKey, prepareFailed).
+ WithError(err).Warn("failed to prepare remote snapshot")
+ } else {
+ base.Labels[remoteLabel] = remoteLabelVal // Mark this snapshot as remote
+ err := o.commit(ctx, true, target, key, append(opts, snapshots.WithLabels(base.Labels))...)
+ if err == nil || errdefs.IsAlreadyExists(err) {
+ // count also AlreadyExists as "success"
+ log.G(lCtx).WithField(remoteSnapshotLogKey, prepareSucceeded).Debug("prepared remote snapshot")
+ return nil, fmt.Errorf("target snapshot %q: %w", target, errdefs.ErrAlreadyExists)
+ }
+ log.G(lCtx).WithField(remoteSnapshotLogKey, prepareFailed).
+ WithError(err).Warn("failed to internally commit remote snapshot")
+ // Don't fallback here (= prohibit to use this key again) because the FileSystem
+ // possible has done some work on this "upper" directory.
+ return nil, err
+ }
+ }
+ return o.mounts(ctx, s, parent)
+}
+
+func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
+ s, err := o.createSnapshot(ctx, snapshots.KindView, key, parent, opts)
+ if err != nil {
+ return nil, err
+ }
+ return o.mounts(ctx, s, parent)
+}
+
+// Mounts returns the mounts for the transaction identified by key. Can be
+// called on an read-write or readonly transaction.
+//
+// This can be used to recover mounts after calling View or Prepare.
+func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+ s, err := storage.GetSnapshot(ctx, key)
+ t.Rollback()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get active mount: %w", err)
+ }
+ return o.mounts(ctx, s, key)
+}
+
+func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
+ return o.commit(ctx, false, name, key, opts...)
+}
+
+func (o *snapshotter) commit(ctx context.Context, isRemote bool, name, key string, opts ...snapshots.Opt) error {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return err
+ }
+
+ rollback := true
+ defer func() {
+ if rollback {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ }
+ }()
+
+ // grab the existing id
+ id, _, usage, err := storage.GetInfo(ctx, key)
+ if err != nil {
+ return err
+ }
+
+ if !isRemote { // skip diskusage for remote snapshots for allowing lazy preparation of nodes
+ du, err := fs.DiskUsage(ctx, o.upperPath(id))
+ if err != nil {
+ return err
+ }
+ usage = snapshots.Usage(du)
+ }
+
+ if _, err = storage.CommitActive(ctx, key, name, usage, opts...); err != nil {
+ return fmt.Errorf("failed to commit snapshot: %w", err)
+ }
+
+ rollback = false
+ return t.Commit()
+}
+
+// Remove abandons the snapshot identified by key. The snapshot will
+// immediately become unavailable and unrecoverable. Disk space will
+// be freed up on the next call to `Cleanup`.
+func (o *snapshotter) Remove(ctx context.Context, key string) (err error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ }
+ }()
+
+ _, _, err = storage.Remove(ctx, key)
+ if err != nil {
+ return fmt.Errorf("failed to remove: %w", err)
+ }
+
+ if !o.asyncRemove {
+ var removals []string
+ const cleanupCommitted = false
+ removals, err = o.getCleanupDirectories(ctx, t, cleanupCommitted)
+ if err != nil {
+ return fmt.Errorf("unable to get directories for removal: %w", err)
+ }
+
+ // Remove directories after the transaction is closed, failures must not
+ // return error since the transaction is committed with the removal
+ // key no longer available.
+ defer func() {
+ if err == nil {
+ for _, dir := range removals {
+ if err := o.cleanupSnapshotDirectory(ctx, dir); err != nil {
+ log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory")
+ }
+ }
+ }
+ }()
+
+ }
+
+ return t.Commit()
+}
+
+// Walk the snapshots.
+func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return err
+ }
+ defer t.Rollback()
+ return storage.WalkInfo(ctx, fn, fs...)
+}
+
+// Cleanup cleans up disk resources from removed or abandoned snapshots
+func (o *snapshotter) Cleanup(ctx context.Context) error {
+ const cleanupCommitted = false
+ return o.cleanup(ctx, cleanupCommitted)
+}
+
+func (o *snapshotter) cleanup(ctx context.Context, cleanupCommitted bool) error {
+ cleanup, err := o.cleanupDirectories(ctx, cleanupCommitted)
+ if err != nil {
+ return err
+ }
+
+ log.G(ctx).Debugf("cleanup: dirs=%v", cleanup)
+ for _, dir := range cleanup {
+ if err := o.cleanupSnapshotDirectory(ctx, dir); err != nil {
+ log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory")
+ }
+ }
+
+ return nil
+}
+
+func (o *snapshotter) cleanupDirectories(ctx context.Context, cleanupCommitted bool) ([]string, error) {
+ // Get a write transaction to ensure no other write transaction can be entered
+ // while the cleanup is scanning.
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return nil, err
+ }
+
+ defer t.Rollback()
+ return o.getCleanupDirectories(ctx, t, cleanupCommitted)
+}
+
+func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Transactor, cleanupCommitted bool) ([]string, error) {
+ ids, err := storage.IDMap(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ snapshotDir := filepath.Join(o.root, "snapshots")
+ fd, err := os.Open(snapshotDir)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ dirs, err := fd.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+
+ cleanup := []string{}
+ for _, d := range dirs {
+ if !cleanupCommitted {
+ if _, ok := ids[d]; ok {
+ continue
+ }
+ }
+
+ cleanup = append(cleanup, filepath.Join(snapshotDir, d))
+ }
+
+ return cleanup, nil
+}
+
+func (o *snapshotter) cleanupSnapshotDirectory(ctx context.Context, dir string) error {
+
+ // On a remote snapshot, the layer is mounted on the "fs" directory.
+ // We use Filesystem's Unmount API so that it can do necessary finalization
+ // before/after the unmount.
+ mp := filepath.Join(dir, "fs")
+ if err := o.fs.Unmount(ctx, mp); err != nil {
+ log.G(ctx).WithError(err).WithField("dir", mp).Debug("failed to unmount")
+ }
+ if err := os.RemoveAll(dir); err != nil {
+ return fmt.Errorf("failed to remove directory %q: %w", dir, err)
+ }
+ return nil
+}
+
+func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ storage.Snapshot, err error) {
+ ctx, t, err := o.ms.TransactionContext(ctx, true)
+ if err != nil {
+ return storage.Snapshot{}, err
+ }
+
+ var td, path string
+ defer func() {
+ if err != nil {
+ if td != "" {
+ if err1 := o.cleanupSnapshotDirectory(ctx, td); err1 != nil {
+ log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory")
+ }
+ }
+ if path != "" {
+ if err1 := o.cleanupSnapshotDirectory(ctx, path); err1 != nil {
+ log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal")
+ err = fmt.Errorf("failed to remove path: %v: %w", err1, err)
+ }
+ }
+ }
+ }()
+
+ snapshotDir := filepath.Join(o.root, "snapshots")
+ td, err = o.prepareDirectory(ctx, snapshotDir, kind)
+ if err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ return storage.Snapshot{}, fmt.Errorf("failed to create prepare snapshot dir: %w", err)
+ }
+ rollback := true
+ defer func() {
+ if rollback {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ }
+ }()
+
+ s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...)
+ if err != nil {
+ return storage.Snapshot{}, fmt.Errorf("failed to create snapshot: %w", err)
+ }
+
+ if len(s.ParentIDs) > 0 {
+ st, err := os.Stat(o.upperPath(s.ParentIDs[0]))
+ if err != nil {
+ return storage.Snapshot{}, fmt.Errorf("failed to stat parent: %w", err)
+ }
+
+ stat := st.Sys().(*syscall.Stat_t)
+
+ if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil {
+ if rerr := t.Rollback(); rerr != nil {
+ log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
+ }
+ return storage.Snapshot{}, fmt.Errorf("failed to chown: %w", err)
+ }
+ }
+
+ path = filepath.Join(snapshotDir, s.ID)
+ if err = os.Rename(td, path); err != nil {
+ return storage.Snapshot{}, fmt.Errorf("failed to rename: %w", err)
+ }
+ td = ""
+
+ rollback = false
+ if err = t.Commit(); err != nil {
+ return storage.Snapshot{}, fmt.Errorf("commit failed: %w", err)
+ }
+
+ return s, nil
+}
+
+func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) {
+ td, err := os.MkdirTemp(snapshotDir, "new-")
+ if err != nil {
+ return "", fmt.Errorf("failed to create temp dir: %w", err)
+ }
+
+ if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil {
+ return td, err
+ }
+
+ if kind == snapshots.KindActive {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil {
+ return td, err
+ }
+ }
+
+ return td, nil
+}
+
+func (o *snapshotter) mounts(ctx context.Context, s storage.Snapshot, checkKey string) ([]mount.Mount, error) {
+ // Make sure that all layers lower than the target layer are available
+ if checkKey != "" && !o.checkAvailability(ctx, checkKey) {
+ return nil, fmt.Errorf("layer %q unavailable: %w", s.ID, errdefs.ErrUnavailable)
+ }
+
+ if len(s.ParentIDs) == 0 {
+ // if we only have one layer/no parents then just return a bind mount as overlay
+ // will not work
+ roFlag := "rw"
+ if s.Kind == snapshots.KindView {
+ roFlag = "ro"
+ }
+
+ return []mount.Mount{
+ {
+ Source: o.upperPath(s.ID),
+ Type: "bind",
+ Options: []string{
+ roFlag,
+ "rbind",
+ },
+ },
+ }, nil
+ }
+ var options []string
+
+ if s.Kind == snapshots.KindActive {
+ options = append(options,
+ fmt.Sprintf("workdir=%s", o.workPath(s.ID)),
+ fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)),
+ )
+ } else if len(s.ParentIDs) == 1 {
+ return []mount.Mount{
+ {
+ Source: o.upperPath(s.ParentIDs[0]),
+ Type: "bind",
+ Options: []string{
+ "ro",
+ "rbind",
+ },
+ },
+ }, nil
+ }
+
+ parentPaths := make([]string, len(s.ParentIDs))
+ for i := range s.ParentIDs {
+ parentPaths[i] = o.upperPath(s.ParentIDs[i])
+ }
+
+ options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":")))
+ if o.userxattr {
+ options = append(options, "userxattr")
+ }
+ return []mount.Mount{
+ {
+ Type: "overlay",
+ Source: "overlay",
+ Options: options,
+ },
+ }, nil
+
+}
+
+func (o *snapshotter) upperPath(id string) string {
+ return filepath.Join(o.root, "snapshots", id, "fs")
+}
+
+func (o *snapshotter) workPath(id string) string {
+ return filepath.Join(o.root, "snapshots", id, "work")
+}
+
+// Close closes the snapshotter
+func (o *snapshotter) Close() error {
+ // unmount all mounts including Committed
+ const cleanupCommitted = true
+ ctx := context.Background()
+ if err := o.cleanup(ctx, cleanupCommitted); err != nil {
+ log.G(ctx).WithError(err).Warn("failed to cleanup")
+ }
+ return o.ms.Close()
+}
+
+// prepareRemoteSnapshot tries to prepare the snapshot as a remote snapshot
+// using filesystems registered in this snapshotter.
+func (o *snapshotter) prepareRemoteSnapshot(ctx context.Context, key string, labels map[string]string) error {
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ return err
+ }
+ defer t.Rollback()
+ id, _, _, err := storage.GetInfo(ctx, key)
+ if err != nil {
+ return err
+ }
+
+ mountpoint := o.upperPath(id)
+ log.G(ctx).Infof("preparing filesystem mount at mountpoint=%v", mountpoint)
+
+ return o.fs.Mount(ctx, mountpoint, labels)
+}
+
+// checkAvailability checks avaiability of the specified layer and all lower
+// layers using filesystem's checking functionality.
+func (o *snapshotter) checkAvailability(ctx context.Context, key string) bool {
+ log.G(ctx).WithField("key", key).Debug("checking layer availability")
+
+ ctx, t, err := o.ms.TransactionContext(ctx, false)
+ if err != nil {
+ log.G(ctx).WithError(err).Warn("failed to get transaction")
+ return false
+ }
+ defer t.Rollback()
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for cKey := key; cKey != ""; {
+ id, info, _, err := storage.GetInfo(ctx, cKey)
+ if err != nil {
+ log.G(ctx).WithError(err).Warnf("failed to get info of %q", cKey)
+ return false
+ }
+ mp := o.upperPath(id)
+ lCtx := log.WithLogger(ctx, log.G(ctx).WithField("mount-point", mp))
+ if _, ok := info.Labels[remoteLabel]; ok {
+ eg.Go(func() error {
+ log.G(lCtx).Debug("checking mount point")
+ if err := o.fs.Check(egCtx, mp, info.Labels); err != nil {
+ log.G(lCtx).WithError(err).Warn("layer is unavailable")
+ return err
+ }
+ return nil
+ })
+ } else {
+ log.G(lCtx).Debug("layer is normal snapshot(overlayfs)")
+ }
+ cKey = info.Parent
+ }
+ if err := eg.Wait(); err != nil {
+ return false
+ }
+ return true
+}
+
+func (o *snapshotter) restoreRemoteSnapshot(ctx context.Context) error {
+ mounts, err := mountinfo.GetMounts(nil)
+ if err != nil {
+ return err
+ }
+ for _, m := range mounts {
+ if strings.HasPrefix(m.Mountpoint, filepath.Join(o.root, "snapshots")) {
+ if err := syscall.Unmount(m.Mountpoint, syscall.MNT_FORCE); err != nil {
+ return fmt.Errorf("failed to unmount %s: %w", m.Mountpoint, err)
+ }
+ }
+ }
+
+ if o.noRestore {
+ return nil
+ }
+
+ var task []snapshots.Info
+ if err := o.Walk(ctx, func(ctx context.Context, info snapshots.Info) error {
+ if _, ok := info.Labels[remoteLabel]; ok {
+ task = append(task, info)
+ }
+ return nil
+ }); err != nil && !errdefs.IsNotFound(err) {
+ return err
+ }
+ for _, info := range task {
+ if err := o.prepareRemoteSnapshot(ctx, info.Name, info.Labels); err != nil {
+ if o.allowInvalidMountsOnRestart {
+ log.G(ctx).WithError(err).Warnf("failed to restore remote snapshot %s; remove this snapshot manually", info.Name)
+ // This snapshot mount is invalid but allow this.
+ // NOTE: snapshotter.Mount() will fail to return the mountpoint of these invalid snapshots so
+ // containerd cannot use them anymore. User needs to manually remove the snapshots from
+ // containerd's metadata store using ctr (e.g. `ctr snapshot rm`).
+ continue
+ }
+ return fmt.Errorf("failed to prepare remote snapshot: %s: %w", info.Name, err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/task/task.go b/vendor/github.com/containerd/stargz-snapshotter/task/task.go
new file mode 100644
index 000000000000..9194e8156846
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/task/task.go
@@ -0,0 +1,155 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package task
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/semaphore"
+)
+
+// NewBackgroundTaskManager provides a task manager. You can specify the
+// concurrency of background tasks. When running a background task, this will be
+// forced to wait until no prioritized task is running for some period. You can
+// specify the period through the argument of this function, too.
+func NewBackgroundTaskManager(concurrency int64, period time.Duration) *BackgroundTaskManager {
+ return &BackgroundTaskManager{
+ backgroundSem: semaphore.NewWeighted(concurrency),
+ prioritizedTaskSilencePeriod: period,
+ prioritizedTaskStartNotify: make(chan struct{}),
+ prioritizedTaskDoneCond: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+// BackgroundTaskManager is a task manager which manages prioritized tasks and
+// background tasks execution. Background tasks are less important than
+// prioritized tasks. You can let these background tasks not to use compute
+// resources (CPU, NW, etc...) during more important tasks(=prioritized tasks)
+// running.
+//
+// When you run a prioritised task and don't want background tasks to use
+// resources you can tell it this manager by calling DoPrioritizedTask method.
+// DonePrioritizedTask method must be called at the end of the prioritised task
+// execution.
+//
+// For running a background task, you can use InvokeBackgroundTask method. The
+// background task must be able to be cancelled via context.Context argument.
+// The task is forced to wait until no prioritized task is running for some
+// period. You can specify the period when making this manager instance. The
+// limited number of background tasks run simultaneously and you can specify the
+// concurrency when making this manager instance too. If a prioritized task
+// starts during the execution of background tasks, all background tasks running
+// will be cancelled via context. These cancelled tasks will be executed again
+// later, same as other background tasks (when no prioritized task is running
+// for some period).
+type BackgroundTaskManager struct {
+ prioritizedTasks int64
+ backgroundSem *semaphore.Weighted
+ prioritizedTaskSilencePeriod time.Duration
+ prioritizedTaskStartNotify chan struct{}
+ prioritizedTaskStartNotifyMu sync.Mutex
+ prioritizedTaskDoneCond *sync.Cond
+}
+
+// DoPrioritizedTask tells the manager that we are running a prioritized task
+// and don't want background tasks to disturb resources(CPU, NW, etc...)
+func (ts *BackgroundTaskManager) DoPrioritizedTask() {
+ // Notify the prioritized task execution to background tasks.
+ ts.prioritizedTaskStartNotifyMu.Lock()
+ atomic.AddInt64(&ts.prioritizedTasks, 1)
+ close(ts.prioritizedTaskStartNotify)
+ ts.prioritizedTaskStartNotify = make(chan struct{})
+ ts.prioritizedTaskStartNotifyMu.Unlock()
+}
+
+// DonePrioritizedTask tells the manager that we've done a prioritized task
+// and don't want background tasks to disturb resources(CPU, NW, etc...)
+func (ts *BackgroundTaskManager) DonePrioritizedTask() {
+ go func() {
+ // Notify the task completion after `ts.prioritizedTaskSilencePeriod`
+ // so that background tasks aren't invoked immediately.
+ time.Sleep(ts.prioritizedTaskSilencePeriod)
+ atomic.AddInt64(&ts.prioritizedTasks, -1)
+ ts.prioritizedTaskDoneCond.L.Lock()
+ ts.prioritizedTaskDoneCond.Broadcast()
+ ts.prioritizedTaskDoneCond.L.Unlock()
+ }()
+}
+
+// InvokeBackgroundTask invokes a background task. The task is started only when
+// no prioritized tasks are running. Prioritized task's execution stops the
+// execution of all background tasks. Background task must be able to be
+// cancelled via context.Context argument and be able to be restarted again.
+func (ts *BackgroundTaskManager) InvokeBackgroundTask(do func(context.Context), timeout time.Duration) {
+ for {
+ // Wait until all prioritized tasks are done
+ for {
+ if atomic.LoadInt64(&ts.prioritizedTasks) <= 0 {
+ break
+ }
+
+ // waits until a prioritized task is done
+ ts.prioritizedTaskDoneCond.L.Lock()
+ if atomic.LoadInt64(&ts.prioritizedTasks) > 0 {
+ ts.prioritizedTaskDoneCond.Wait()
+ }
+ ts.prioritizedTaskDoneCond.L.Unlock()
+ }
+
+ // limited number of background tasks can run at once.
+ // if prioritized tasks are running, cancel this task.
+ if func() bool {
+ ts.backgroundSem.Acquire(context.Background(), 1)
+ defer ts.backgroundSem.Release(1)
+
+ // Get notify the prioritized tasks execution.
+ ts.prioritizedTaskStartNotifyMu.Lock()
+ ch := ts.prioritizedTaskStartNotify
+ tasks := atomic.LoadInt64(&ts.prioritizedTasks)
+ ts.prioritizedTaskStartNotifyMu.Unlock()
+ if tasks > 0 {
+ return false
+ }
+
+ // Invoke the background task. if some prioritized tasks added during
+ // execution, cancel it and try it later.
+ var (
+ done = make(chan struct{})
+ ctx, cancel = context.WithTimeout(context.Background(), timeout)
+ )
+ defer cancel()
+ go func() {
+ do(ctx)
+ close(done)
+ }()
+
+ // Wait until the background task is done or canceled.
+ select {
+ case <-ch: // some prioritized tasks started; retry it later
+ cancel()
+ return false
+ case <-done: // All tasks completed
+ }
+ return true
+ }() {
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/cacheutil/lrucache.go b/vendor/github.com/containerd/stargz-snapshotter/util/cacheutil/lrucache.go
new file mode 100644
index 000000000000..b43b4919409c
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/cacheutil/lrucache.go
@@ -0,0 +1,140 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cacheutil
+
+import (
+ "sync"
+
+ "github.com/golang/groupcache/lru"
+)
+
+// LRUCache is "groupcache/lru"-like cache. The difference is that "groupcache/lru" immediately
+// finalizes theevicted contents using OnEvicted callback but our version strictly tracks the
+// reference counts of contents and calls OnEvicted when nobody refers to the evicted contents.
+type LRUCache struct {
+ cache *lru.Cache
+ mu sync.Mutex
+
+ // OnEvicted optionally specifies a callback function to be
+ // executed when an entry is purged from the cache.
+ OnEvicted func(key string, value interface{})
+}
+
+// NewLRUCache creates new lru cache.
+func NewLRUCache(maxEntries int) *LRUCache {
+ inner := lru.New(maxEntries)
+ inner.OnEvicted = func(key lru.Key, value interface{}) {
+ // Decrease the ref count incremented in Add().
+ // When nobody refers to this value, this value will be finalized via refCounter.
+ value.(*refCounter).finalize()
+ }
+ return &LRUCache{
+ cache: inner,
+ }
+}
+
+// Get retrieves the specified object from the cache and increments the reference counter of the
+// target content. Client must call `done` callback to decrease the reference count when the value
+// will no longer be used.
+func (c *LRUCache) Get(key string) (value interface{}, done func(), ok bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ o, ok := c.cache.Get(key)
+ if !ok {
+ return nil, nil, false
+ }
+ rc := o.(*refCounter)
+ rc.inc()
+ return rc.v, c.decreaseOnceFunc(rc), true
+}
+
+// Add adds object to the cache and returns the cached contents with incrementing the reference count.
+// If the specified content already exists in the cache, this sets `added` to false and returns
+// "already cached" content (i.e. doesn't replace the content with the new one). Client must call
+// `done` callback to decrease the counter when the value will no longer be used.
+func (c *LRUCache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if o, ok := c.cache.Get(key); ok {
+ rc := o.(*refCounter)
+ rc.inc()
+ return rc.v, c.decreaseOnceFunc(rc), false
+ }
+ rc := &refCounter{
+ key: key,
+ v: value,
+ onEvicted: c.OnEvicted,
+ }
+ rc.initialize() // Keep this object having at least 1 ref count (will be decreased in OnEviction)
+ rc.inc() // The client references this object (will be decreased on "done")
+ c.cache.Add(key, rc)
+ return rc.v, c.decreaseOnceFunc(rc), true
+}
+
+// Remove removes the specified contents from the cache. OnEvicted callback will be called when
+// nobody refers to the removed content.
+func (c *LRUCache) Remove(key string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.cache.Remove(key)
+}
+
+func (c *LRUCache) decreaseOnceFunc(rc *refCounter) func() {
+ var once sync.Once
+ return func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ once.Do(func() { rc.dec() })
+ }
+}
+
+type refCounter struct {
+ onEvicted func(key string, value interface{})
+
+ key string
+ v interface{}
+ refCounts int64
+
+ mu sync.Mutex
+
+ initializeOnce sync.Once
+ finalizeOnce sync.Once
+}
+
+func (r *refCounter) inc() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.refCounts++
+}
+
+func (r *refCounter) dec() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.refCounts--
+ if r.refCounts <= 0 && r.onEvicted != nil {
+ // nobody will refer this object
+ r.onEvicted(r.key, r.v)
+ }
+}
+
+func (r *refCounter) initialize() {
+ r.initializeOnce.Do(func() { r.inc() })
+}
+
+func (r *refCounter) finalize() {
+ r.finalizeOnce.Do(func() { r.dec() })
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/cacheutil/ttlcache.go b/vendor/github.com/containerd/stargz-snapshotter/util/cacheutil/ttlcache.go
new file mode 100644
index 000000000000..92954dc70795
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/cacheutil/ttlcache.go
@@ -0,0 +1,115 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cacheutil
+
+import (
+ "sync"
+ "time"
+)
+
+// TTLCache is a ttl-based cache with reference counters.
+// Each elements is deleted as soon as expiering the configured ttl.
+type TTLCache struct {
+ m map[string]*refCounterWithTimer
+ mu sync.Mutex
+ ttl time.Duration
+
+ // OnEvicted optionally specifies a callback function to be
+ // executed when an entry is purged from the cache.
+ OnEvicted func(key string, value interface{})
+}
+
+// NewTTLCache creates a new ttl-based cache.
+func NewTTLCache(ttl time.Duration) *TTLCache {
+ return &TTLCache{
+ m: make(map[string]*refCounterWithTimer),
+ ttl: ttl,
+ }
+}
+
+// Get retrieves the specified object from the cache and increments the reference counter of the
+// target content. Client must call `done` callback to decrease the reference count when the value
+// will no longer be used.
+func (c *TTLCache) Get(key string) (value interface{}, done func(), ok bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ rc, ok := c.m[key]
+ if !ok {
+ return nil, nil, false
+ }
+ rc.inc()
+ return rc.v, c.decreaseOnceFunc(rc), true
+}
+
+// Add adds object to the cache and returns the cached contents with incrementing the reference count.
+// If the specified content already exists in the cache, this sets `added` to false and returns
+// "already cached" content (i.e. doesn't replace the content with the new one). Client must call
+// `done` callback to decrease the counter when the value will no longer be used.
+func (c *TTLCache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if rc, ok := c.m[key]; ok {
+ rc.inc()
+ return rc.v, c.decreaseOnceFunc(rc), false
+ }
+ rc := &refCounterWithTimer{
+ refCounter: &refCounter{
+ key: key,
+ v: value,
+ onEvicted: c.OnEvicted,
+ },
+ }
+ rc.initialize() // Keep this object having at least 1 ref count (will be decreased in OnEviction)
+ rc.inc() // The client references this object (will be decreased on "done")
+ rc.t = time.AfterFunc(c.ttl, func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.evictLocked(key)
+ })
+ c.m[key] = rc
+ return rc.v, c.decreaseOnceFunc(rc), true
+}
+
+// Remove removes the specified contents from the cache. OnEvicted callback will be called when
+// nobody refers to the removed content.
+func (c *TTLCache) Remove(key string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.evictLocked(key)
+}
+
+func (c *TTLCache) evictLocked(key string) {
+ if rc, ok := c.m[key]; ok {
+ delete(c.m, key)
+ rc.t.Stop() // stop timer to prevent GC to this content anymore
+ rc.finalize()
+ }
+}
+
+func (c *TTLCache) decreaseOnceFunc(rc *refCounterWithTimer) func() {
+ var once sync.Once
+ return func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ once.Do(func() { rc.dec() })
+ }
+}
+
+type refCounterWithTimer struct {
+ *refCounter
+ t *time.Timer
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/containerdutil/manifest.go b/vendor/github.com/containerd/stargz-snapshotter/util/containerdutil/manifest.go
new file mode 100644
index 000000000000..99e201b80ecf
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/containerdutil/manifest.go
@@ -0,0 +1,220 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package containerdutil
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/platforms"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func ManifestDesc(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
+ var (
+ limit = 1
+ m []ocispec.Descriptor
+ wasIndex bool
+ )
+ if err := images.Walk(ctx, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+ switch desc.MediaType {
+ case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+ p, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+ if err := ValidateMediaType(p, desc.MediaType); err != nil {
+ return nil, err
+ }
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(p, &manifest); err != nil {
+ return nil, err
+ }
+ if desc.Digest != image.Digest {
+ if desc.Platform != nil && !platform.Match(*desc.Platform) {
+ return nil, nil
+ }
+ if desc.Platform == nil {
+ p, err := content.ReadBlob(ctx, provider, manifest.Config)
+ if err != nil {
+ return nil, err
+ }
+ var image ocispec.Image
+ if err := json.Unmarshal(p, &image); err != nil {
+ return nil, err
+ }
+ if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
+ return nil, nil
+ }
+ }
+ }
+ m = append(m, desc)
+ return nil, nil
+ case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+ p, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ return nil, err
+ }
+ if err := ValidateMediaType(p, desc.MediaType); err != nil {
+ return nil, err
+ }
+ var idx ocispec.Index
+ if err := json.Unmarshal(p, &idx); err != nil {
+ return nil, err
+ }
+ var descs []ocispec.Descriptor
+ for _, d := range idx.Manifests {
+ if d.Platform == nil || platform.Match(*d.Platform) {
+ descs = append(descs, d)
+ }
+ }
+ sort.SliceStable(descs, func(i, j int) bool {
+ if descs[i].Platform == nil {
+ return false
+ }
+ if descs[j].Platform == nil {
+ return true
+ }
+ return platform.Less(*descs[i].Platform, *descs[j].Platform)
+ })
+ wasIndex = true
+ if len(descs) > limit {
+ return descs[:limit], nil
+ }
+ return descs, nil
+ }
+ return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound)
+ }), image); err != nil {
+ return ocispec.Descriptor{}, err
+ }
+ if len(m) == 0 {
+ err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound)
+ if wasIndex {
+ err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound)
+ }
+ return ocispec.Descriptor{}, err
+ }
+ return m[0], nil
+}
+
+// Forked from github.com/containerd/containerd/v2/core/images/image.go
+// commit: a776a27af54a803657d002e7574a4425b3949f56
+
+// unknownDocument represents a manifest, manifest list, or index that has not
+// yet been validated.
+type unknownDocument struct {
+ MediaType string `json:"mediaType,omitempty"`
+ Config json.RawMessage `json:"config,omitempty"`
+ Layers json.RawMessage `json:"layers,omitempty"`
+ Manifests json.RawMessage `json:"manifests,omitempty"`
+ FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1
+}
+
+// ValidateMediaType returns an error if the byte slice is invalid JSON or if
+// the media type identifies the blob as one format but it contains elements of
+// another format.
+func ValidateMediaType(b []byte, mt string) error {
+ var doc unknownDocument
+ if err := json.Unmarshal(b, &doc); err != nil {
+ return err
+ }
+ if len(doc.FSLayers) != 0 {
+ return fmt.Errorf("media-type: schema 1 not supported")
+ }
+ switch mt {
+ case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+ if len(doc.Manifests) != 0 ||
+ doc.MediaType == images.MediaTypeDockerSchema2ManifestList ||
+ doc.MediaType == ocispec.MediaTypeImageIndex {
+ return fmt.Errorf("media-type: expected manifest but found index (%s)", mt)
+ }
+ case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+ if len(doc.Config) != 0 || len(doc.Layers) != 0 ||
+ doc.MediaType == images.MediaTypeDockerSchema2Manifest ||
+ doc.MediaType == ocispec.MediaTypeImageManifest {
+ return fmt.Errorf("media-type: expected index but found manifest (%s)", mt)
+ }
+ }
+ return nil
+}
+
+// Fetch manifest of the specified platform
+func FetchManifestPlatform(ctx context.Context, fetcher remotes.Fetcher, desc ocispec.Descriptor, platform ocispec.Platform) (ocispec.Manifest, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Minute)
+ defer cancel()
+
+ r, err := fetcher.Fetch(ctx, desc)
+ if err != nil {
+ return ocispec.Manifest{}, err
+ }
+ defer r.Close()
+
+ var manifest ocispec.Manifest
+ switch desc.MediaType {
+ case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+ p, err := io.ReadAll(r)
+ if err != nil {
+ return ocispec.Manifest{}, err
+ }
+ if err := ValidateMediaType(p, desc.MediaType); err != nil {
+ return ocispec.Manifest{}, err
+ }
+ if err := json.Unmarshal(p, &manifest); err != nil {
+ return ocispec.Manifest{}, err
+ }
+ return manifest, nil
+ case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+ var index ocispec.Index
+ p, err := io.ReadAll(r)
+ if err != nil {
+ return ocispec.Manifest{}, err
+ }
+ if err := ValidateMediaType(p, desc.MediaType); err != nil {
+ return ocispec.Manifest{}, err
+ }
+ if err = json.Unmarshal(p, &index); err != nil {
+ return ocispec.Manifest{}, err
+ }
+ var target ocispec.Descriptor
+ found := false
+ for _, m := range index.Manifests {
+ p := platforms.DefaultSpec()
+ if m.Platform != nil {
+ p = *m.Platform
+ }
+ if !platforms.NewMatcher(platform).Match(p) {
+ continue
+ }
+ target = m
+ found = true
+ break
+ }
+ if !found {
+ return ocispec.Manifest{}, fmt.Errorf("no manifest found for platform")
+ }
+ return FetchManifestPlatform(ctx, fetcher, target, platform)
+ }
+ return ocispec.Manifest{}, fmt.Errorf("unknown mediatype %q", desc.MediaType)
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/ioutils/countwriter.go b/vendor/github.com/containerd/stargz-snapshotter/util/ioutils/countwriter.go
new file mode 100644
index 000000000000..159292437ad6
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/ioutils/countwriter.go
@@ -0,0 +1,38 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package ioutils
+
+import "sync"
+
+type CountWriter struct {
+ n int64
+ mu sync.Mutex
+}
+
+func (c *CountWriter) Write(p []byte) (n int, err error) {
+ c.mu.Lock()
+ c.n += int64(len(p))
+ c.mu.Unlock()
+ return len(p), nil
+}
+
+func (c *CountWriter) Size() (n int64) {
+ c.mu.Lock()
+ n = c.n
+ c.mu.Unlock()
+ return
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/namedmutex/namedmutex.go b/vendor/github.com/containerd/stargz-snapshotter/util/namedmutex/namedmutex.go
new file mode 100644
index 000000000000..2ca28fa783fc
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/namedmutex/namedmutex.go
@@ -0,0 +1,62 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package namedmutex provides NamedMutex that wraps sync.Mutex
+// and provides namespaced mutex.
+package namedmutex
+
+import (
+ "sync"
+)
+
+// NamedMutex wraps sync.Mutex and provides namespaced mutex.
+type NamedMutex struct {
+ muMap map[string]*sync.Mutex
+ refMap map[string]int
+
+ mu sync.Mutex
+}
+
+// Lock locks the mutex of the given name
+func (nl *NamedMutex) Lock(name string) {
+ nl.mu.Lock()
+ if nl.muMap == nil {
+ nl.muMap = make(map[string]*sync.Mutex)
+ }
+ if nl.refMap == nil {
+ nl.refMap = make(map[string]int)
+ }
+ if _, ok := nl.muMap[name]; !ok {
+ nl.muMap[name] = &sync.Mutex{}
+ }
+ mu := nl.muMap[name]
+ nl.refMap[name]++
+ nl.mu.Unlock()
+ mu.Lock()
+}
+
+// Unlock unlocks the mutex of the given name
+func (nl *NamedMutex) Unlock(name string) {
+ nl.mu.Lock()
+ mu := nl.muMap[name]
+ nl.refMap[name]--
+ if nl.refMap[name] <= 0 {
+ delete(nl.muMap, name)
+ delete(nl.refMap, name)
+ }
+ nl.mu.Unlock()
+ mu.Unlock()
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/compression.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/compression.go
new file mode 100644
index 000000000000..bf5f8d9e7bcf
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/compression.go
@@ -0,0 +1,80 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package testutil
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ esgzexternaltoc "github.com/containerd/stargz-snapshotter/estargz/externaltoc"
+ "github.com/containerd/stargz-snapshotter/estargz/zstdchunked"
+ "github.com/klauspost/compress/zstd"
+)
+
+type Compression interface {
+ estargz.Compressor
+ estargz.Decompressor
+
+ // DecompressTOC decompresses the passed blob and returns a reader of TOC JSON.
+ // This is needed to be used from metadata pkg
+ DecompressTOC(io.Reader) (tocJSON io.ReadCloser, err error)
+}
+
+type CompressionFactory func() Compression
+
+type zstdCompression struct {
+ *zstdchunked.Compressor
+ *zstdchunked.Decompressor
+}
+
+func ZstdCompressionWithLevel(compressionLevel zstd.EncoderLevel) CompressionFactory {
+ return func() Compression {
+ return &zstdCompression{&zstdchunked.Compressor{CompressionLevel: compressionLevel}, &zstdchunked.Decompressor{}}
+ }
+}
+
+type gzipCompression struct {
+ *estargz.GzipCompressor
+ *estargz.GzipDecompressor
+}
+
+func GzipCompressionWithLevel(compressionLevel int) CompressionFactory {
+ return func() Compression {
+ return gzipCompression{estargz.NewGzipCompressorWithLevel(compressionLevel), &estargz.GzipDecompressor{}}
+ }
+}
+
+type externalTOCGzipCompression struct {
+ *esgzexternaltoc.GzipCompressor
+ *esgzexternaltoc.GzipDecompressor
+}
+
+func ExternalTOCGzipCompressionWithLevel(compressionLevel int) CompressionFactory {
+ return func() Compression {
+ compressor := esgzexternaltoc.NewGzipCompressorWithLevel(compressionLevel)
+ decompressor := esgzexternaltoc.NewGzipDecompressor(func() ([]byte, error) {
+ buf := new(bytes.Buffer)
+ if _, err := compressor.WriteTOCTo(buf); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ })
+ return &externalTOCGzipCompression{compressor, decompressor}
+ }
+
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go
new file mode 100644
index 000000000000..c56df81c9c52
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go
@@ -0,0 +1,80 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package testutil
+
+import (
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images/archive"
+ "github.com/containerd/containerd/v2/plugins/content/local"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ // HelloArchiveURL points to an OCI archive of `hello-world`.
+ // Exported from `docker.io/library/hello-world@sha256:1a523af650137b8accdaed439c17d684df61ee4d74feac151b5b337bd29e7eec` .
+ // See /~https://github.com/AkihiroSuda/test-oci-archives/releases/tag/v20210101
+ HelloArchiveURL = "/~https://github.com/AkihiroSuda/test-oci-archives/releases/download/v20210101/hello-world.tar.gz"
+ // HelloArchiveDigest is the digest of the archive.
+ HelloArchiveDigest = "sha256:5aa022621c4de0e941ab2a30d4569c403e156b4ba2de2ec32e382ae8679f40e1"
+)
+
+// EnsureHello creates a temp content store and ensures `hello-world` image from HelloArchiveURL into the store.
+func EnsureHello(ctx context.Context) (*ocispec.Descriptor, content.Store, error) {
+ // Pulling an image without the daemon is a mess, so we use OCI archive here.
+ resp, err := http.Get(HelloArchiveURL)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ sha256Digester := digest.SHA256.Digester()
+ sha256Hasher := sha256Digester.Hash()
+ tr := io.TeeReader(resp.Body, sha256Hasher)
+ gzReader, err := gzip.NewReader(tr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ tempDir, err := os.MkdirTemp("", "test-estargz")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ cs, err := local.NewStore(tempDir)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ desc, err := archive.ImportIndex(ctx, cs, gzReader)
+ if err != nil {
+ return nil, nil, err
+ }
+ resp.Body.Close()
+ if d := sha256Digester.Digest().String(); d != HelloArchiveDigest {
+ err = fmt.Errorf("expected digest of %q to be %q, got %q", HelloArchiveURL, HelloArchiveDigest, d)
+ return nil, nil, err
+ }
+ return &desc, cs, nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go
new file mode 100644
index 000000000000..3331a380ce2e
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go
@@ -0,0 +1,74 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package testutil
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/containerd/stargz-snapshotter/estargz"
+ digest "github.com/opencontainers/go-digest"
+)
+
+type buildEStargzOptions struct {
+ estargzOptions []estargz.Option
+ buildTarOptions []BuildTarOption
+}
+
+type BuildEStargzOption func(o *buildEStargzOptions) error
+
+// WithEStargzOptions specifies options for estargz lib
+func WithEStargzOptions(eo ...estargz.Option) BuildEStargzOption {
+ return func(o *buildEStargzOptions) error {
+ o.estargzOptions = append(o.estargzOptions, eo...)
+ return nil
+ }
+}
+
+// WithBuildTarOptions option specifies the options for tar creation
+func WithBuildTarOptions(to ...BuildTarOption) BuildEStargzOption {
+ return func(o *buildEStargzOptions) error {
+ o.buildTarOptions = append(o.buildTarOptions, to...)
+ return nil
+ }
+}
+
+func BuildEStargz(ents []TarEntry, opts ...BuildEStargzOption) (*io.SectionReader, digest.Digest, error) {
+ var beOpts buildEStargzOptions
+ for _, o := range opts {
+ o(&beOpts)
+ }
+ tarBuf := new(bytes.Buffer)
+ if _, err := io.Copy(tarBuf, BuildTar(ents, beOpts.buildTarOptions...)); err != nil {
+ return nil, "", err
+ }
+ tarData := tarBuf.Bytes()
+ rc, err := estargz.Build(
+ io.NewSectionReader(bytes.NewReader(tarData), 0, int64(len(tarData))),
+ beOpts.estargzOptions...)
+ if err != nil {
+ return nil, "", err
+ }
+ defer rc.Close()
+ vsb := new(bytes.Buffer)
+ if _, err := io.Copy(vsb, rc); err != nil {
+ return nil, "", err
+ }
+ vsbb := vsb.Bytes()
+
+ return io.NewSectionReader(bytes.NewReader(vsbb), 0, int64(len(vsbb))), rc.TOCDigest(), nil
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/tar.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/tar.go
new file mode 100644
index 000000000000..5f8a6c4b679d
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/tar.go
@@ -0,0 +1,310 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package testutil
+
+// This utility helps test codes to generate sample tar blobs.
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+// TarEntry is an entry of tar.
+type TarEntry interface {
+ AppendTar(tw *tar.Writer, opts BuildTarOptions) error
+}
+
+// BuildTarOptions is a set of options used during building blob.
+type BuildTarOptions struct {
+
+ // Prefix is the prefix string need to be added to each file name (e.g. "./", "/", etc.)
+ Prefix string
+}
+
+// BuildTarOption is an option used during building blob.
+type BuildTarOption func(o *BuildTarOptions)
+
+// WithPrefix is an option to add a prefix string to each file name (e.g. "./", "/", etc.)
+func WithPrefix(prefix string) BuildTarOption {
+ return func(o *BuildTarOptions) {
+ o.Prefix = prefix
+ }
+}
+
+// BuildTar builds a tar blob
+func BuildTar(ents []TarEntry, opts ...BuildTarOption) io.Reader {
+ var bo BuildTarOptions
+ for _, o := range opts {
+ o(&bo)
+ }
+ pr, pw := io.Pipe()
+ go func() {
+ tw := tar.NewWriter(pw)
+ for _, ent := range ents {
+ if err := ent.AppendTar(tw, bo); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+ if err := tw.Close(); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ pw.Close()
+ }()
+ return pr
+}
+
+type tarEntryFunc func(*tar.Writer, BuildTarOptions) error
+
+func (f tarEntryFunc) AppendTar(tw *tar.Writer, opts BuildTarOptions) error { return f(tw, opts) }
+
+// DirectoryBuildTarOption is an option for a directory entry.
+type DirectoryBuildTarOption func(o *dirOpts)
+
+type dirOpts struct {
+ uid int
+ gid int
+ xattrs map[string]string
+ mode *os.FileMode
+ modTime time.Time
+}
+
+// WithDirModTime specifies the modtime of the dir.
+func WithDirModTime(modTime time.Time) DirectoryBuildTarOption {
+ return func(o *dirOpts) {
+ o.modTime = modTime
+ }
+}
+
+// WithDirOwner specifies the owner of the directory.
+func WithDirOwner(uid, gid int) DirectoryBuildTarOption {
+ return func(o *dirOpts) {
+ o.uid = uid
+ o.gid = gid
+ }
+}
+
+// WithDirXattrs specifies the extended attributes of the directory.
+func WithDirXattrs(xattrs map[string]string) DirectoryBuildTarOption {
+ return func(o *dirOpts) {
+ o.xattrs = xattrs
+ }
+}
+
+// WithDirMode specifies the mode of the directory.
+func WithDirMode(mode os.FileMode) DirectoryBuildTarOption {
+ return func(o *dirOpts) {
+ o.mode = &mode
+ }
+}
+
+// Dir is a directory entry
+func Dir(name string, opts ...DirectoryBuildTarOption) TarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, buildOpts BuildTarOptions) error {
+ var dOpts dirOpts
+ for _, o := range opts {
+ o(&dOpts)
+ }
+ if !strings.HasSuffix(name, "/") {
+ panic(fmt.Sprintf("missing trailing slash in dir %q ", name))
+ }
+ var mode int64 = 0755
+ if dOpts.mode != nil {
+ mode = permAndExtraMode2TarMode(*dOpts.mode)
+ }
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeDir,
+ Name: buildOpts.Prefix + name,
+ Mode: mode,
+ ModTime: dOpts.modTime,
+ Xattrs: dOpts.xattrs,
+ Uid: dOpts.uid,
+ Gid: dOpts.gid,
+ })
+ })
+}
+
+// FileBuildTarOption is an option for a file entry.
+type FileBuildTarOption func(o *fileOpts)
+
+type fileOpts struct {
+ uid int
+ gid int
+ xattrs map[string]string
+ mode *os.FileMode
+ modTime time.Time
+}
+
+// WithFileOwner specifies the owner of the file.
+func WithFileOwner(uid, gid int) FileBuildTarOption {
+ return func(o *fileOpts) {
+ o.uid = uid
+ o.gid = gid
+ }
+}
+
+// WithFileXattrs specifies the extended attributes of the file.
+func WithFileXattrs(xattrs map[string]string) FileBuildTarOption {
+ return func(o *fileOpts) {
+ o.xattrs = xattrs
+ }
+}
+
+// WithFileModTime specifies the modtime of the file.
+func WithFileModTime(modTime time.Time) FileBuildTarOption {
+ return func(o *fileOpts) {
+ o.modTime = modTime
+ }
+}
+
+// WithFileMode specifies the mode of the file.
+func WithFileMode(mode os.FileMode) FileBuildTarOption {
+ return func(o *fileOpts) {
+ o.mode = &mode
+ }
+}
+
+// File is a regilar file entry
+func File(name, contents string, opts ...FileBuildTarOption) TarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, buildOpts BuildTarOptions) error {
+ var fOpts fileOpts
+ for _, o := range opts {
+ o(&fOpts)
+ }
+ if strings.HasSuffix(name, "/") {
+ return fmt.Errorf("bogus trailing slash in file %q", name)
+ }
+ var mode int64 = 0644
+ if fOpts.mode != nil {
+ mode = permAndExtraMode2TarMode(*fOpts.mode)
+ }
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: buildOpts.Prefix + name,
+ Mode: mode,
+ ModTime: fOpts.modTime,
+ Xattrs: fOpts.xattrs,
+ Size: int64(len(contents)),
+ Uid: fOpts.uid,
+ Gid: fOpts.gid,
+ }); err != nil {
+ return err
+ }
+ _, err := io.WriteString(tw, contents)
+ return err
+ })
+}
+
+// Symlink is a symlink entry
+func Symlink(name, target string) TarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, buildOpts BuildTarOptions) error {
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeSymlink,
+ Name: buildOpts.Prefix + name,
+ Linkname: target,
+ Mode: 0644,
+ })
+ })
+}
+
+// Link is a hard-link entry
+func Link(name, linkname string) TarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeLink,
+ Name: buildOpts.Prefix + name,
+ Linkname: linkname,
+ ModTime: now,
+ AccessTime: now,
+ ChangeTime: now,
+ })
+ })
+}
+
+// Chardev is a character device entry
+func Chardev(name string, major, minor int64) TarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeChar,
+ Name: buildOpts.Prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ AccessTime: now,
+ ChangeTime: now,
+ })
+ })
+}
+
+// Blockdev is a block device entry
+func Blockdev(name string, major, minor int64) TarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeBlock,
+ Name: buildOpts.Prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ AccessTime: now,
+ ChangeTime: now,
+ })
+ })
+}
+
+// Fifo is a fifo entry
+func Fifo(name string) TarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, buildOpts BuildTarOptions) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeFifo,
+ Name: buildOpts.Prefix + name,
+ ModTime: now,
+ AccessTime: now,
+ ChangeTime: now,
+ })
+ })
+}
+
+// suid, guid, sticky bits for archive/tar
+// /~https://github.com/golang/go/blob/release-branch.go1.13/src/archive/tar/common.go#L607-L609
+const (
+ cISUID = 04000 // Set uid
+ cISGID = 02000 // Set gid
+ cISVTX = 01000 // Save text (sticky bit)
+)
+
+func permAndExtraMode2TarMode(fm os.FileMode) (tm int64) {
+ tm = int64(fm & os.ModePerm)
+ if fm&os.ModeSetuid != 0 {
+ tm |= cISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ tm |= cISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ tm |= cISVTX
+ }
+ return
+}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/util.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/util.go
new file mode 100644
index 000000000000..e4313481ecc3
--- /dev/null
+++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/util.go
@@ -0,0 +1,31 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package testutil
+
+import (
+ "crypto/rand"
+ "testing"
+)
+
+// RandomBytes returns the specified number of random bytes
+func RandomBytes(t *testing.T, n int) []byte {
+ b := make([]byte, n)
+ if _, err := rand.Read(b); err != nil {
+ t.Fatalf("failed rand.Read: %v", err)
+ }
+ return b
+}
diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md
index e3d0742f456c..8d86600a40b3 100644
--- a/vendor/github.com/containerd/typeurl/v2/README.md
+++ b/vendor/github.com/containerd/typeurl/v2/README.md
@@ -2,7 +2,7 @@
[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl)
[![Build Status](/~https://github.com/containerd/typeurl/workflows/CI/badge.svg)](/~https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
-[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
+[![codecov](https://codecov.io/gh/containerd/typeurl/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl)
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
@@ -13,8 +13,8 @@ This package helps when types are sent over a ttrpc/GRPC API and marshaled as a
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
- * [Project governance](/~https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](/~https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](/~https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+ * [Project governance](/~https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](/~https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](/~https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](/~https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go
index 8d6665bb5ba7..78817b701e3a 100644
--- a/vendor/github.com/containerd/typeurl/v2/types.go
+++ b/vendor/github.com/containerd/typeurl/v2/types.go
@@ -27,6 +27,7 @@ import (
gogoproto "github.com/gogo/protobuf/proto"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/known/anypb"
)
var (
@@ -122,6 +123,9 @@ func TypeURL(v interface{}) (string, error) {
// Is returns true if the type of the Any is the same as v.
func Is(any Any, v interface{}) bool {
+ if any == nil {
+ return false
+ }
// call to check that v is a pointer
tryDereference(v)
url, err := TypeURL(v)
@@ -193,6 +197,31 @@ func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
return err
}
+// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any.
+func MarshalProto(from Any) *anypb.Any {
+ if from == nil {
+ return nil
+ }
+
+ if pbany, ok := from.(*anypb.Any); ok {
+ return pbany
+ }
+
+ return &anypb.Any{
+ TypeUrl: from.GetTypeUrl(),
+ Value: from.GetValue(),
+ }
+}
+
+// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any.
+func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
+ anyType, err := MarshalAny(from)
+ if err != nil {
+ return nil, err
+ }
+ return MarshalProto(anyType), nil
+}
+
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
t, err := getTypeByUrl(typeURL)
if err != nil {
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
index 0d82a2dd3c60..201a12e97783 100644
--- a/vendor/github.com/containernetworking/cni/libcni/api.go
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -15,7 +15,7 @@
package libcni
// Note this is the actual implementation of the CNI specification, which
-// is reflected in the /~https://github.com/containernetworking/cni/blob/master/SPEC.md file
+// is reflected in the SPEC.md file.
// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this
// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example,
// to add an IP to a container, to parse the configuration of the CNI and so on.
@@ -23,10 +23,11 @@ package libcni
import (
"context"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
+ "sort"
"strings"
"github.com/containernetworking/cni/pkg/invoke"
@@ -38,6 +39,8 @@ import (
var (
CacheDir = "/var/lib/cni"
+ // slightly awkward wording to preserve anyone matching on error strings
+ ErrorCheckNotSupp = fmt.Errorf("does not support the CHECK command")
)
const (
@@ -73,10 +76,25 @@ type NetworkConfigList struct {
Name string
CNIVersion string
DisableCheck bool
+ DisableGC bool
Plugins []*NetworkConfig
Bytes []byte
}
+type NetworkAttachment struct {
+ ContainerID string
+ Network string
+ IfName string
+ Config []byte
+ NetNS string
+ CniArgs [][2]string
+ CapabilityArgs map[string]interface{}
+}
+
+type GCArgs struct {
+ ValidAttachments []types.GCAttachment
+}
+
type CNI interface {
AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
@@ -92,6 +110,13 @@ type CNI interface {
ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
+
+ GCNetworkList(ctx context.Context, net *NetworkConfigList, args *GCArgs) error
+ GetStatusNetworkList(ctx context.Context, net *NetworkConfigList) error
+
+ GetCachedAttachments(containerID string) ([]*NetworkAttachment, error)
+
+ GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error)
}
type CNIConfig struct {
@@ -139,8 +164,11 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ
if err != nil {
return nil, err
}
+ if rt != nil {
+ return injectRuntimeConfig(orig, rt)
+ }
- return injectRuntimeConfig(orig, rt)
+ return orig, nil
}
// This function takes a libcni RuntimeConf structure and injects values into
@@ -195,6 +223,7 @@ type cachedInfo struct {
Config []byte `json:"config"`
IfName string `json:"ifName"`
NetworkName string `json:"networkName"`
+ NetNS string `json:"netns,omitempty"`
CniArgs [][2]string `json:"cniArgs,omitempty"`
CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"`
RawResult map[string]interface{} `json:"result,omitempty"`
@@ -229,6 +258,7 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string,
Config: config,
IfName: rt.IfName,
NetworkName: netName,
+ NetNS: rt.NetNS,
CniArgs: rt.Args,
CapabilityArgs: rt.CapabilityArgs,
}
@@ -254,11 +284,11 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string,
if err != nil {
return err
}
- if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(fname), 0o700); err != nil {
return err
}
- return ioutil.WriteFile(fname, newBytes, 0600)
+ return os.WriteFile(fname, newBytes, 0o600)
}
func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error {
@@ -277,7 +307,7 @@ func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *R
if err != nil {
return nil, nil, err
}
- bytes, err = ioutil.ReadFile(fname)
+ bytes, err = os.ReadFile(fname)
if err != nil {
// Ignore read errors; the cached result may not exist on-disk
return nil, nil, nil
@@ -305,7 +335,7 @@ func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *Runtim
if err != nil {
return nil, err
}
- data, err := ioutil.ReadFile(fname)
+ data, err := os.ReadFile(fname)
if err != nil {
// Ignore read errors; the cached result may not exist on-disk
return nil, nil
@@ -333,7 +363,7 @@ func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf)
if err != nil {
return nil, err
}
- fdata, err := ioutil.ReadFile(fname)
+ fdata, err := os.ReadFile(fname)
if err != nil {
// Ignore read errors; the cached result may not exist on-disk
return nil, nil
@@ -390,6 +420,68 @@ func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf)
return c.getCachedConfig(net.Network.Name, rt)
}
+// GetCachedAttachments returns a list of network attachments from the cache.
+// The returned list will be filtered by the containerID if the value is not empty.
+func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) {
+ dirPath := filepath.Join(c.getCacheDir(&RuntimeConf{}), "results")
+ entries, err := os.ReadDir(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ fileNames := make([]string, 0, len(entries))
+ for _, e := range entries {
+ fileNames = append(fileNames, e.Name())
+ }
+ sort.Strings(fileNames)
+
+ attachments := []*NetworkAttachment{}
+ for _, fname := range fileNames {
+ if len(containerID) > 0 {
+ part := fmt.Sprintf("-%s-", containerID)
+ pos := strings.Index(fname, part)
+ if pos <= 0 || pos+len(part) >= len(fname) {
+ continue
+ }
+ }
+
+ cacheFile := filepath.Join(dirPath, fname)
+ bytes, err := os.ReadFile(cacheFile)
+ if err != nil {
+ continue
+ }
+
+ cachedInfo := cachedInfo{}
+
+ if err := json.Unmarshal(bytes, &cachedInfo); err != nil {
+ continue
+ }
+ if cachedInfo.Kind != CNICacheV1 {
+ continue
+ }
+ if len(containerID) > 0 && cachedInfo.ContainerID != containerID {
+ continue
+ }
+ if cachedInfo.IfName == "" || cachedInfo.NetworkName == "" {
+ continue
+ }
+
+ attachments = append(attachments, &NetworkAttachment{
+ ContainerID: cachedInfo.ContainerID,
+ Network: cachedInfo.NetworkName,
+ IfName: cachedInfo.IfName,
+ Config: cachedInfo.Config,
+ NetNS: cachedInfo.NetNS,
+ CniArgs: cachedInfo.CniArgs,
+ CapabilityArgs: cachedInfo.CapabilityArgs,
+ })
+ }
+ return attachments, nil
+}
+
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
c.ensureExec()
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
@@ -453,7 +545,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
return err
} else if !gtet {
- return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion)
+ return fmt.Errorf("configuration version %q %w", list.CNIVersion, ErrorCheckNotSupp)
}
if list.DisableCheck {
@@ -497,9 +589,9 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
return err
} else if gtet {
- cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt)
- if err != nil {
- return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err)
+ if cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt); err != nil {
+ _ = c.cacheDel(list.Name, rt)
+ cachedResult = nil
}
}
@@ -509,6 +601,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err)
}
}
+
_ = c.cacheDel(list.Name, rt)
return nil
@@ -547,7 +640,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
return err
} else if !gtet {
- return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
+ return fmt.Errorf("configuration version %q %w", net.Network.CNIVersion, ErrorCheckNotSupp)
}
cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
@@ -666,6 +759,129 @@ func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (vers
return invoke.GetVersionInfo(ctx, pluginPath, c.exec)
}
+// GCNetworkList will do two things
+// - dump the list of cached attachments, and issue deletes as necessary
+// - issue a GC to the underlying plugins (if the version is high enough)
+func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, args *GCArgs) error {
+ // If DisableGC is set, then don't bother GCing at all.
+ if list.DisableGC {
+ return nil
+ }
+
+ // First, get the list of cached attachments
+ cachedAttachments, err := c.GetCachedAttachments("")
+ if err != nil {
+ return nil
+ }
+
+ var validAttachments map[types.GCAttachment]interface{}
+ if args != nil {
+ validAttachments = make(map[types.GCAttachment]interface{}, len(args.ValidAttachments))
+ for _, a := range args.ValidAttachments {
+ validAttachments[a] = nil
+ }
+ }
+
+ var errs []error
+
+ for _, cachedAttachment := range cachedAttachments {
+ if cachedAttachment.Network != list.Name {
+ continue
+ }
+ // we found this attachment
+ gca := types.GCAttachment{
+ ContainerID: cachedAttachment.ContainerID,
+ IfName: cachedAttachment.IfName,
+ }
+ if _, ok := validAttachments[gca]; ok {
+ continue
+ }
+ // otherwise, this attachment wasn't valid and we should issue a CNI DEL
+ rt := RuntimeConf{
+ ContainerID: cachedAttachment.ContainerID,
+ NetNS: cachedAttachment.NetNS,
+ IfName: cachedAttachment.IfName,
+ Args: cachedAttachment.CniArgs,
+ CapabilityArgs: cachedAttachment.CapabilityArgs,
+ }
+ if err := c.DelNetworkList(ctx, list, &rt); err != nil {
+ errs = append(errs, fmt.Errorf("failed to delete stale attachment %s %s: %w", rt.ContainerID, rt.IfName, err))
+ }
+ }
+
+ // now, if the version supports it, issue a GC
+ if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); gt {
+ inject := map[string]interface{}{
+ "name": list.Name,
+ "cniVersion": list.CNIVersion,
+ }
+ if args != nil {
+ inject["cni.dev/valid-attachments"] = args.ValidAttachments
+ // #1101: spec used incorrect variable name
+ inject["cni.dev/attachments"] = args.ValidAttachments
+ }
+
+ for _, plugin := range list.Plugins {
+ // build config here
+ pluginConfig, err := InjectConf(plugin, inject)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to generate configuration to GC plugin %s: %w", plugin.Network.Type, err))
+ }
+ if err := c.gcNetwork(ctx, pluginConfig); err != nil {
+ errs = append(errs, fmt.Errorf("failed to GC plugin %s: %w", plugin.Network.Type, err))
+ }
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+func (c *CNIConfig) gcNetwork(ctx context.Context, net *NetworkConfig) error {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+ args := c.args("GC", &RuntimeConf{})
+
+ return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec)
+}
+
+func (c *CNIConfig) GetStatusNetworkList(ctx context.Context, list *NetworkConfigList) error {
+ // If the version doesn't support status, abort.
+ if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); !gt {
+ return nil
+ }
+
+ inject := map[string]interface{}{
+ "name": list.Name,
+ "cniVersion": list.CNIVersion,
+ }
+
+ for _, plugin := range list.Plugins {
+ // build config here
+ pluginConfig, err := InjectConf(plugin, inject)
+ if err != nil {
+ return fmt.Errorf("failed to generate configuration to get plugin STATUS %s: %w", plugin.Network.Type, err)
+ }
+ if err := c.getStatusNetwork(ctx, pluginConfig); err != nil {
+ return err // Don't collect errors here, so we return a clean error code.
+ }
+ }
+ return nil
+}
+
+func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *NetworkConfig) error {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+ args := c.args("STATUS", &RuntimeConf{})
+
+ return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec)
+}
+
// =====
func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args {
return &invoke.Args{
diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go
index 3cd6a59d1c09..1d1b821c636d 100644
--- a/vendor/github.com/containernetworking/cni/libcni/conf.go
+++ b/vendor/github.com/containernetworking/cni/libcni/conf.go
@@ -16,13 +16,16 @@ package libcni
import (
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
+ "slices"
"sort"
+ "strings"
"github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/version"
)
type NotFoundError struct {
@@ -54,7 +57,7 @@ func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
}
func ConfFromFile(filename string) (*NetworkConfig, error) {
- bytes, err := ioutil.ReadFile(filename)
+ bytes, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("error reading %s: %w", filename, err)
}
@@ -85,17 +88,89 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
}
}
- disableCheck := false
- if rawDisableCheck, ok := rawList["disableCheck"]; ok {
- disableCheck, ok = rawDisableCheck.(bool)
+ rawVersions, ok := rawList["cniVersions"]
+ if ok {
+ // Parse the current package CNI version
+ rvs, ok := rawVersions.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions: %T", rvs)
+ }
+ vs := make([]string, 0, len(rvs))
+ for i, rv := range rvs {
+ v, ok := rv.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions index %d: %T", i, rv)
+ }
+ gt, err := version.GreaterThan(v, version.Current())
+ if err != nil {
+ return nil, fmt.Errorf("error parsing configuration list: invalid cniVersions entry %s at index %d: %w", v, i, err)
+ } else if !gt {
+ // Skip versions "greater" than this implementation of the spec
+ vs = append(vs, v)
+ }
+ }
+
+ // if cniVersion was already set, append it to the list for sorting.
+ if cniVersion != "" {
+ gt, err := version.GreaterThan(cniVersion, version.Current())
+ if err != nil {
+ return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion %s: %w", cniVersion, err)
+ } else if !gt {
+ // ignore any versions higher than the current implemented spec version
+ vs = append(vs, cniVersion)
+ }
+ }
+ slices.SortFunc[[]string](vs, func(v1, v2 string) int {
+ if v1 == v2 {
+ return 0
+ }
+ if gt, _ := version.GreaterThan(v1, v2); gt {
+ return 1
+ }
+ return -1
+ })
+ if len(vs) > 0 {
+ cniVersion = vs[len(vs)-1]
+ }
+ }
+
+ readBool := func(key string) (bool, error) {
+ rawVal, ok := rawList[key]
if !ok {
- return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
+ return false, nil
}
+ if b, ok := rawVal.(bool); ok {
+ return b, nil
+ }
+
+ s, ok := rawVal.(string)
+ if !ok {
+ return false, fmt.Errorf("error parsing configuration list: invalid type %T for %s", rawVal, key)
+ }
+ s = strings.ToLower(s)
+ switch s {
+ case "false":
+ return false, nil
+ case "true":
+ return true, nil
+ }
+ return false, fmt.Errorf("error parsing configuration list: invalid value %q for %s", s, key)
+ }
+
+ disableCheck, err := readBool("disableCheck")
+ if err != nil {
+ return nil, err
+ }
+
+ disableGC, err := readBool("disableGC")
+ if err != nil {
+ return nil, err
}
list := &NetworkConfigList{
Name: name,
DisableCheck: disableCheck,
+ DisableGC: disableGC,
CNIVersion: cniVersion,
Bytes: bytes,
}
@@ -129,7 +204,7 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
}
func ConfListFromFile(filename string) (*NetworkConfigList, error) {
- bytes, err := ioutil.ReadFile(filename)
+ bytes, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("error reading %s: %w", filename, err)
}
@@ -138,7 +213,7 @@ func ConfListFromFile(filename string) (*NetworkConfigList, error) {
func ConfFiles(dir string, extensions []string) ([]string, error) {
// In part, adapted from rkt/networking/podenv.go#listFiles
- files, err := ioutil.ReadDir(dir)
+ files, err := os.ReadDir(dir)
switch {
case err == nil: // break
case os.IsNotExist(err):
@@ -206,7 +281,8 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) {
singleConf, err := LoadConf(dir, name)
if err != nil {
// A little extra logic so the error makes sense
- if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok {
+ var ncfErr NoConfigsFoundError
+ if len(files) != 0 && errors.As(err, &ncfErr) {
// Config lists found but no config files found
return nil, NotFoundError{dir, name}
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
index 8defe4dd3982..c8b548e7c616 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
@@ -51,25 +51,34 @@ func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exe
// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
// JSON configuration
func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ return delegateNoResult(ctx, delegatePlugin, netconf, exec, "CHECK")
+}
+
+func delegateNoResult(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec, verb string) error {
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return err
}
- // DelegateCheck will override the original CNI_COMMAND env from process with CHECK
- return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec)
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs(verb), realExec)
}
// DelegateDel calls the given delegate plugin with the CNI DEL action and
// JSON configuration
func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
- pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
- if err != nil {
- return err
- }
+ return delegateNoResult(ctx, delegatePlugin, netconf, exec, "DEL")
+}
- // DelegateDel will override the original CNI_COMMAND env from process with DEL
- return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec)
+// DelegateStatus calls the given delegate plugin with the CNI STATUS action and
+// JSON configuration
+func DelegateStatus(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ return delegateNoResult(ctx, delegatePlugin, netconf, exec, "STATUS")
+}
+
+// DelegateGC calls the given delegate plugin with the CNI GC action and
+// JSON configuration
+func DelegateGC(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ return delegateNoResult(ctx, delegatePlugin, netconf, exec, "GC")
}
// return CNIArgs used by delegation
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
index 3ad07aa8f2d6..a5e015fc9250 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
@@ -81,17 +81,17 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
// object to ExecPluginWithResult() to verify the incoming stdin and environment
// and provide a tailored response:
//
-//import (
+// import (
// "encoding/json"
// "path"
// "strings"
-//)
+// )
//
-//type fakeExec struct {
+// type fakeExec struct {
// version.PluginDecoder
-//}
+// }
//
-//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+// func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
// net := &types.NetConf{}
// err := json.Unmarshal(stdinData, net)
// if err != nil {
@@ -109,14 +109,14 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
// }
// }
// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil
-//}
+// }
//
-//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) {
+// func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) {
// if len(paths) > 0 {
// return path.Join(paths[0], plugin), nil
// }
// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths)
-//}
+// }
func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
if exec == nil {
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
index 9bcfb4553677..ed0999bd0e1f 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package invoke
diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go
new file mode 100644
index 000000000000..cffe136178f9
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go
@@ -0,0 +1,21 @@
+// Copyright 2022 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import "github.com/containernetworking/cni/pkg/types"
+
+func CheckNetNS(nsPath string) (bool, *types.Error) {
+ return false, nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go
new file mode 100644
index 000000000000..3d58e75d6cc4
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go
@@ -0,0 +1,50 @@
+// Copyright 2022 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import (
+ "runtime"
+
+ "github.com/vishvananda/netns"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+// Returns an object representing the current OS thread's network namespace
+func getCurrentNS() (netns.NsHandle, error) {
+ // Lock the thread in case other goroutine executes in it and changes its
+ // network namespace after getCurrentThreadNetNSPath(), otherwise it might
+ // return an unexpected network namespace.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ return netns.Get()
+}
+
+func CheckNetNS(nsPath string) (bool, *types.Error) {
+ ns, err := netns.GetFromPath(nsPath)
+ // Let plugins check whether nsPath from args is valid. Also support CNI DEL for empty nsPath as already-deleted nsPath.
+ if err != nil {
+ return false, nil
+ }
+ defer ns.Close()
+
+ pluginNS, err := getCurrentNS()
+ if err != nil {
+ return false, types.NewError(types.ErrInvalidNetNS, "get plugin's netns failed", "")
+ }
+ defer pluginNS.Close()
+
+ return pluginNS.Equal(ns), nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go
new file mode 100644
index 000000000000..cffe136178f9
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go
@@ -0,0 +1,21 @@
+// Copyright 2022 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import "github.com/containernetworking/cni/pkg/types"
+
+func CheckNetNS(nsPath string) (bool, *types.Error) {
+ return false, nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go
index cb8781972d75..f29cf3459426 100644
--- a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go
+++ b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go
@@ -19,13 +19,14 @@ package skel
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"log"
"os"
"strings"
+ "github.com/containernetworking/cni/pkg/ns"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/utils"
"github.com/containernetworking/cni/pkg/version"
@@ -34,12 +35,13 @@ import (
// CmdArgs captures all the arguments passed in to the plugin
// via both env vars and stdin
type CmdArgs struct {
- ContainerID string
- Netns string
- IfName string
- Args string
- Path string
- StdinData []byte
+ ContainerID string
+ Netns string
+ IfName string
+ Args string
+ Path string
+ NetnsOverride string
+ StdinData []byte
}
type dispatcher struct {
@@ -55,21 +57,25 @@ type dispatcher struct {
type reqForCmdEntry map[string]bool
func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
- var cmd, contID, netns, ifName, args, path string
+ var cmd, contID, netns, ifName, args, path, netnsOverride string
vars := []struct {
- name string
- val *string
- reqForCmd reqForCmdEntry
+ name string
+ val *string
+ reqForCmd reqForCmdEntry
+ validateFn func(string) *types.Error
}{
{
"CNI_COMMAND",
&cmd,
reqForCmdEntry{
- "ADD": true,
- "CHECK": true,
- "DEL": true,
+ "ADD": true,
+ "CHECK": true,
+ "DEL": true,
+ "GC": true,
+ "STATUS": true,
},
+ nil,
},
{
"CNI_CONTAINERID",
@@ -79,6 +85,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
"CHECK": true,
"DEL": true,
},
+ utils.ValidateContainerID,
},
{
"CNI_NETNS",
@@ -88,6 +95,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
"CHECK": true,
"DEL": false,
},
+ nil,
},
{
"CNI_IFNAME",
@@ -97,6 +105,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
"CHECK": true,
"DEL": true,
},
+ utils.ValidateInterfaceName,
},
{
"CNI_ARGS",
@@ -106,15 +115,29 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
"CHECK": false,
"DEL": false,
},
+ nil,
},
{
"CNI_PATH",
&path,
reqForCmdEntry{
- "ADD": true,
- "CHECK": true,
- "DEL": true,
+ "ADD": true,
+ "CHECK": true,
+ "DEL": true,
+ "GC": true,
+ "STATUS": true,
+ },
+ nil,
+ },
+ {
+ "CNI_NETNS_OVERRIDE",
+ &netnsOverride,
+ reqForCmdEntry{
+ "ADD": false,
+ "CHECK": false,
+ "DEL": false,
},
+ nil,
},
}
@@ -125,6 +148,10 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
if v.reqForCmd[cmd] || v.name == "CNI_COMMAND" {
argsMissing = append(argsMissing, v.name)
}
+ } else if v.reqForCmd[cmd] && v.validateFn != nil {
+ if err := v.validateFn(*v.val); err != nil {
+ return "", nil, err
+ }
}
}
@@ -137,18 +164,25 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) {
t.Stdin = bytes.NewReader(nil)
}
- stdinData, err := ioutil.ReadAll(t.Stdin)
+ stdinData, err := io.ReadAll(t.Stdin)
if err != nil {
return "", nil, types.NewError(types.ErrIOFailure, fmt.Sprintf("error reading from stdin: %v", err), "")
}
+ if cmd != "VERSION" {
+ if err := validateConfig(stdinData); err != nil {
+ return "", nil, err
+ }
+ }
+
cmdArgs := &CmdArgs{
- ContainerID: contID,
- Netns: netns,
- IfName: ifName,
- Args: args,
- Path: path,
- StdinData: stdinData,
+ ContainerID: contID,
+ Netns: netns,
+ IfName: ifName,
+ Args: args,
+ Path: path,
+ StdinData: stdinData,
+ NetnsOverride: netnsOverride,
}
return cmd, cmdArgs, nil
}
@@ -163,8 +197,13 @@ func (t *dispatcher) checkVersionAndCall(cmdArgs *CmdArgs, pluginVersionInfo ver
return types.NewError(types.ErrIncompatibleCNIVersion, "incompatible CNI versions", verErr.Details())
}
+ if toCall == nil {
+ return nil
+ }
+
if err = toCall(cmdArgs); err != nil {
- if e, ok := err.(*types.Error); ok {
+ var e *types.Error
+ if errors.As(err, &e) {
// don't wrap Error in Error
return e
}
@@ -190,7 +229,7 @@ func validateConfig(jsonBytes []byte) *types.Error {
return nil
}
-func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error {
+func (t *dispatcher) pluginMain(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error {
cmd, cmdArgs, err := t.getCmdArgsFromEnv()
if err != nil {
// Print the about string to stderr when no command is set
@@ -202,21 +241,20 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error,
return err
}
- if cmd != "VERSION" {
- if err = validateConfig(cmdArgs.StdinData); err != nil {
- return err
- }
- if err = utils.ValidateContainerID(cmdArgs.ContainerID); err != nil {
+ switch cmd {
+ case "ADD":
+ err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Add)
+ if err != nil {
return err
}
- if err = utils.ValidateInterfaceName(cmdArgs.IfName); err != nil {
- return err
+ if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" {
+ isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns)
+ if checkErr != nil {
+ return checkErr
+ } else if isPluginNetNS {
+ return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "")
+ }
}
- }
-
- switch cmd {
- case "ADD":
- err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdAdd)
case "CHECK":
configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData)
if err != nil {
@@ -232,7 +270,7 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error,
if err != nil {
return types.NewError(types.ErrDecodingFailure, err.Error(), "")
} else if gtet {
- if err := t.checkVersionAndCall(cmdArgs, versionInfo, cmdCheck); err != nil {
+ if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Check); err != nil {
return err
}
return nil
@@ -240,7 +278,62 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error,
}
return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow CHECK", "")
case "DEL":
- err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdDel)
+ err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Del)
+ if err != nil {
+ return err
+ }
+ if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" {
+ isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns)
+ if checkErr != nil {
+ return checkErr
+ } else if isPluginNetNS {
+ return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "")
+ }
+ }
+ case "GC":
+ configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData)
+ if err != nil {
+ return types.NewError(types.ErrDecodingFailure, err.Error(), "")
+ }
+ if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil {
+ return types.NewError(types.ErrDecodingFailure, err.Error(), "")
+ } else if !gtet {
+ return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow GC", "")
+ }
+ for _, pluginVersion := range versionInfo.SupportedVersions() {
+ gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion)
+ if err != nil {
+ return types.NewError(types.ErrDecodingFailure, err.Error(), "")
+ } else if gtet {
+ if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.GC); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+ return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow GC", "")
+ case "STATUS":
+ configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData)
+ if err != nil {
+ return types.NewError(types.ErrDecodingFailure, err.Error(), "")
+ }
+ if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil {
+ return types.NewError(types.ErrDecodingFailure, err.Error(), "")
+ } else if !gtet {
+ return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow STATUS", "")
+ }
+ for _, pluginVersion := range versionInfo.SupportedVersions() {
+ gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion)
+ if err != nil {
+ return types.NewError(types.ErrDecodingFailure, err.Error(), "")
+ } else if gtet {
+ if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Status); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+ return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow STATUS", "")
case "VERSION":
if err := versionInfo.Encode(t.Stdout); err != nil {
return types.NewError(types.ErrIOFailure, err.Error(), "")
@@ -264,13 +357,63 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error,
//
// To let this package automatically handle errors and call os.Exit(1) for you,
// use PluginMain() instead.
+//
+// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncsWithError instead.
func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error {
+ return PluginMainFuncsWithError(CNIFuncs{Add: cmdAdd, Check: cmdCheck, Del: cmdDel}, versionInfo, about)
+}
+
+// CNIFuncs contains a group of callback command funcs to be passed in as
+// parameters to the core "main" for a plugin.
+type CNIFuncs struct {
+ Add func(_ *CmdArgs) error
+ Del func(_ *CmdArgs) error
+ Check func(_ *CmdArgs) error
+ GC func(_ *CmdArgs) error
+ Status func(_ *CmdArgs) error
+}
+
+// PluginMainFuncsWithError is the core "main" for a plugin. It accepts
+// callback functions defined within CNIFuncs and returns an error.
+//
+// The caller must also specify what CNI spec versions the plugin supports.
+//
+// It is the responsibility of the caller to check for non-nil error return.
+//
+// For a plugin to comply with the CNI spec, it must print any error to stdout
+// as JSON and then exit with nonzero status code.
+//
+// To let this package automatically handle errors and call os.Exit(1) for you,
+// use PluginMainFuncs() instead.
+func PluginMainFuncsWithError(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error {
return (&dispatcher{
Getenv: os.Getenv,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
- }).pluginMain(cmdAdd, cmdCheck, cmdDel, versionInfo, about)
+ }).pluginMain(funcs, versionInfo, about)
+}
+
+// PluginMainFuncs is the core "main" for a plugin which includes automatic error handling.
+// This is a newer alternative func to PluginMain which abstracts CNI commands within a
+// CNIFuncs interface.
+//
+// The caller must also specify what CNI spec versions the plugin supports.
+//
+// The caller can specify an "about" string, which is printed on stderr
+// when no CNI_COMMAND is specified. The recommended output is "CNI plugin v"
+//
+// When an error occurs in any func in CNIFuncs, PluginMainFuncs will print the error
+// as JSON to stdout and call os.Exit(1).
+//
+// To have more control over error handling, use PluginMainFuncsWithError() instead.
+func PluginMainFuncs(funcs CNIFuncs, versionInfo version.PluginInfo, about string) {
+ if e := PluginMainFuncsWithError(funcs, versionInfo, about); e != nil {
+ if err := e.Print(); err != nil {
+ log.Print("Error writing error JSON to stdout: ", err)
+ }
+ os.Exit(1)
+ }
}
// PluginMain is the core "main" for a plugin which includes automatic error handling.
@@ -284,6 +427,8 @@ func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versio
// as JSON to stdout and call os.Exit(1).
//
// To have more control over error handling, use PluginMainWithError() instead.
+//
+// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncs instead.
func PluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) {
if e := PluginMainWithError(cmdAdd, cmdCheck, cmdDel, versionInfo, about); e != nil {
if err := e.Print(); err != nil {
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go
index 0e1e8b857b78..f58b91206dc1 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go
@@ -26,9 +26,10 @@ import (
convert "github.com/containernetworking/cni/pkg/types/internal"
)
-const ImplementedSpecVersion string = "1.0.0"
+// The types did not change between v1.0 and v1.1
+const ImplementedSpecVersion string = "1.1.0"
-var supportedVersions = []string{ImplementedSpecVersion}
+var supportedVersions = []string{"1.0.0", "1.1.0"}
// Register converters for all versions less than the implemented spec version
func init() {
@@ -38,10 +39,14 @@ func init() {
convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x)
convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x)
convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x)
+ convert.RegisterConverter("1.0.0", []string{"1.1.0"}, convertFrom100)
// Down-converters
convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x)
convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+ convert.RegisterConverter("1.1.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x)
+ convert.RegisterConverter("1.1.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+ convert.RegisterConverter("1.1.0", []string{"1.0.0"}, convertFrom100)
// Creator
convert.RegisterCreator(supportedVersions, NewResult)
@@ -90,12 +95,49 @@ type Result struct {
DNS types.DNS `json:"dns,omitempty"`
}
+// Note: DNS should be omit if DNS is empty but default Marshal function
+// will output empty structure hence need to write a Marshal function
+func (r *Result) MarshalJSON() ([]byte, error) {
+ // use type alias to escape recursion for json.Marshal() to MarshalJSON()
+ type fixObjType = Result
+
+ bytes, err := json.Marshal(fixObjType(*r)) //nolint:all
+ if err != nil {
+ return nil, err
+ }
+
+ fixupObj := make(map[string]interface{})
+ if err := json.Unmarshal(bytes, &fixupObj); err != nil {
+ return nil, err
+ }
+
+ if r.DNS.IsEmpty() {
+ delete(fixupObj, "dns")
+ }
+
+ return json.Marshal(fixupObj)
+}
+
+// convertFrom100 does nothing except set the version; the types are the same
+func convertFrom100(from types.Result, toVersion string) (types.Result, error) {
+ fromResult := from.(*Result)
+
+ result := &Result{
+ CNIVersion: toVersion,
+ Interfaces: fromResult.Interfaces,
+ IPs: fromResult.IPs,
+ Routes: fromResult.Routes,
+ DNS: fromResult.DNS,
+ }
+ return result, nil
+}
+
func convertFrom02x(from types.Result, toVersion string) (types.Result, error) {
result040, err := convert.Convert(from, "0.4.0")
if err != nil {
return nil, err
}
- result100, err := convertFrom04x(result040, ImplementedSpecVersion)
+ result100, err := convertFrom04x(result040, toVersion)
if err != nil {
return nil, err
}
@@ -226,9 +268,12 @@ func (r *Result) PrintTo(writer io.Writer) error {
// Interface contains values about the created interfaces
type Interface struct {
- Name string `json:"name"`
- Mac string `json:"mac,omitempty"`
- Sandbox string `json:"sandbox,omitempty"`
+ Name string `json:"name"`
+ Mac string `json:"mac,omitempty"`
+ Mtu int `json:"mtu,omitempty"`
+ Sandbox string `json:"sandbox,omitempty"`
+ SocketPath string `json:"socketPath,omitempty"`
+ PciID string `json:"pciID,omitempty"`
}
func (i *Interface) String() string {
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go
index 7516f03ef581..68a602bfdb49 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/args.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go
@@ -26,8 +26,8 @@ import (
type UnmarshallableBool bool
// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Returns boolean true if the string is "1" or "[Tt]rue"
-// Returns boolean false if the string is "0" or "[Ff]alse"
+// Returns boolean true if the string is "1" or "true" or "True"
+// Returns boolean false if the string is "0" or "false" or "False”
func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
s := strings.ToLower(string(data))
switch s {
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go
index ed28b33e8e1c..452cb62201d0 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go
@@ -19,6 +19,9 @@ import (
"fmt"
"github.com/containernetworking/cni/pkg/types"
+ _ "github.com/containernetworking/cni/pkg/types/020"
+ _ "github.com/containernetworking/cni/pkg/types/040"
+ _ "github.com/containernetworking/cni/pkg/types/100"
convert "github.com/containernetworking/cni/pkg/types/internal"
)
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go
index fba17dfc0f39..8453bb5d87c5 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go
@@ -56,30 +56,73 @@ func (n *IPNet) UnmarshalJSON(data []byte) error {
return nil
}
-// NetConf describes a network.
-type NetConf struct {
+// NetConfType describes a network.
+type NetConfType struct {
CNIVersion string `json:"cniVersion,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
Capabilities map[string]bool `json:"capabilities,omitempty"`
IPAM IPAM `json:"ipam,omitempty"`
- DNS DNS `json:"dns"`
+ DNS DNS `json:"dns,omitempty"`
RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
PrevResult Result `json:"-"`
+
+ // ValidAttachments is only supplied when executing a GC operation
+ ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"`
+}
+
+// NetConf is defined as different type as custom MarshalJSON() and issue #1096
+type NetConf NetConfType
+
+// GCAttachment is the parameters to a GC call -- namely,
+// the container ID and ifname pair that represents a
+// still-valid attachment.
+type GCAttachment struct {
+ ContainerID string `json:"containerID"`
+ IfName string `json:"ifname"`
+}
+
+// Note: DNS should be omit if DNS is empty but default Marshal function
+// will output empty structure hence need to write a Marshal function
+func (n *NetConfType) MarshalJSON() ([]byte, error) {
+ // use type alias to escape recursion for json.Marshal() to MarshalJSON()
+ type fixObjType = NetConf
+
+ bytes, err := json.Marshal(fixObjType(*n))
+ if err != nil {
+ return nil, err
+ }
+
+ fixupObj := make(map[string]interface{})
+ if err := json.Unmarshal(bytes, &fixupObj); err != nil {
+ return nil, err
+ }
+
+ if n.DNS.IsEmpty() {
+ delete(fixupObj, "dns")
+ }
+
+ return json.Marshal(fixupObj)
}
type IPAM struct {
Type string `json:"type,omitempty"`
}
+// IsEmpty returns true if IPAM structure has no value, otherwise return false
+func (i *IPAM) IsEmpty() bool {
+ return i.Type == ""
+}
+
// NetConfList describes an ordered list of networks.
type NetConfList struct {
CNIVersion string `json:"cniVersion,omitempty"`
Name string `json:"name,omitempty"`
DisableCheck bool `json:"disableCheck,omitempty"`
+ DisableGC bool `json:"disableGC,omitempty"`
Plugins []*NetConf `json:"plugins,omitempty"`
}
@@ -116,31 +159,48 @@ type DNS struct {
Options []string `json:"options,omitempty"`
}
+// IsEmpty returns true if DNS structure has no value, otherwise return false
+func (d *DNS) IsEmpty() bool {
+ if len(d.Nameservers) == 0 && d.Domain == "" && len(d.Search) == 0 && len(d.Options) == 0 {
+ return true
+ }
+ return false
+}
+
func (d *DNS) Copy() *DNS {
if d == nil {
return nil
}
to := &DNS{Domain: d.Domain}
- for _, ns := range d.Nameservers {
- to.Nameservers = append(to.Nameservers, ns)
- }
- for _, s := range d.Search {
- to.Search = append(to.Search, s)
- }
- for _, o := range d.Options {
- to.Options = append(to.Options, o)
- }
+ to.Nameservers = append(to.Nameservers, d.Nameservers...)
+ to.Search = append(to.Search, d.Search...)
+ to.Options = append(to.Options, d.Options...)
return to
}
type Route struct {
- Dst net.IPNet
- GW net.IP
+ Dst net.IPNet
+ GW net.IP
+ MTU int
+ AdvMSS int
+ Priority int
+ Table *int
+ Scope *int
}
func (r *Route) String() string {
- return fmt.Sprintf("%+v", *r)
+ table := ""
+ if r.Table != nil {
+ table = fmt.Sprintf("%d", *r.Table)
+ }
+
+ scope := ""
+ if r.Scope != nil {
+ scope = fmt.Sprintf("%d", *r.Scope)
+ }
+
+ return fmt.Sprintf("{Dst:%+v GW:%v MTU:%d AdvMSS:%d Priority:%d Table:%s Scope:%s}", r.Dst, r.GW, r.MTU, r.AdvMSS, r.Priority, table, scope)
}
func (r *Route) Copy() *Route {
@@ -148,14 +208,30 @@ func (r *Route) Copy() *Route {
return nil
}
- return &Route{
- Dst: r.Dst,
- GW: r.GW,
+ route := &Route{
+ Dst: r.Dst,
+ GW: r.GW,
+ MTU: r.MTU,
+ AdvMSS: r.AdvMSS,
+ Priority: r.Priority,
+ Scope: r.Scope,
+ }
+
+ if r.Table != nil {
+ table := *r.Table
+ route.Table = &table
}
+
+ if r.Scope != nil {
+ scope := *r.Scope
+ route.Scope = &scope
+ }
+
+ return route
}
// Well known error codes
-// see /~https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
+// see /~https://github.com/containernetworking/cni/blob/main/SPEC.md#well-known-error-codes
const (
ErrUnknown uint = iota // 0
ErrIncompatibleCNIVersion // 1
@@ -165,6 +241,7 @@ const (
ErrIOFailure // 5
ErrDecodingFailure // 6
ErrInvalidNetworkConfig // 7
+ ErrInvalidNetNS // 8
ErrTryAgainLater uint = 11
ErrInternal uint = 999
)
@@ -200,8 +277,13 @@ func (e *Error) Print() error {
// JSON (un)marshallable types
type route struct {
- Dst IPNet `json:"dst"`
- GW net.IP `json:"gw,omitempty"`
+ Dst IPNet `json:"dst"`
+ GW net.IP `json:"gw,omitempty"`
+ MTU int `json:"mtu,omitempty"`
+ AdvMSS int `json:"advmss,omitempty"`
+ Priority int `json:"priority,omitempty"`
+ Table *int `json:"table,omitempty"`
+ Scope *int `json:"scope,omitempty"`
}
func (r *Route) UnmarshalJSON(data []byte) error {
@@ -212,13 +294,24 @@ func (r *Route) UnmarshalJSON(data []byte) error {
r.Dst = net.IPNet(rt.Dst)
r.GW = rt.GW
+ r.MTU = rt.MTU
+ r.AdvMSS = rt.AdvMSS
+ r.Priority = rt.Priority
+ r.Table = rt.Table
+ r.Scope = rt.Scope
+
return nil
}
func (r Route) MarshalJSON() ([]byte, error) {
rt := route{
- Dst: IPNet(r.Dst),
- GW: r.GW,
+ Dst: IPNet(r.Dst),
+ GW: r.GW,
+ MTU: r.MTU,
+ AdvMSS: r.AdvMSS,
+ Priority: r.Priority,
+ Table: r.Table,
+ Scope: r.Scope,
}
return json.Marshal(rt)
diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
index b8ec38874595..1981d2556914 100644
--- a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
+++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
@@ -36,7 +36,6 @@ var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`)
// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters
func ValidateContainerID(containerID string) *types.Error {
-
if containerID == "" {
return types.NewError(types.ErrUnknownContainer, "missing containerID", "")
}
@@ -48,7 +47,6 @@ func ValidateContainerID(containerID string) *types.Error {
// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters
func ValidateNetworkName(networkName string) *types.Error {
-
if networkName == "" {
return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "")
}
@@ -58,11 +56,11 @@ func ValidateNetworkName(networkName string) *types.Error {
return nil
}
-// ValidateInterfaceName will validate the interface name based on the three rules below
+// ValidateInterfaceName will validate the interface name based on the four rules below
// 1. The name must not be empty
// 2. The name must be less than 16 characters
// 3. The name must not be "." or ".."
-// 3. The name must not contain / or : or any whitespace characters
+// 4. The name must not contain / or : or any whitespace characters
// ref to /~https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024
func ValidateInterfaceName(ifName string) *types.Error {
if len(ifName) == 0 {
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
index 17b22b6b0c4e..e3bd375bca1e 100644
--- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
+++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
@@ -142,3 +142,27 @@ func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
}
return false, nil
}
+
+// GreaterThan returns true if the first version is greater than the second
+func GreaterThan(version, otherVersion string) (bool, error) {
+ firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
+ if err != nil {
+ return false, err
+ }
+
+ secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion)
+ if err != nil {
+ return false, err
+ }
+
+ if firstMajor > secondMajor {
+ return true, nil
+ } else if firstMajor == secondMajor {
+ if firstMinor > secondMinor {
+ return true, nil
+ } else if firstMinor == secondMinor && firstMicro > secondMicro {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go
index 1326f8038e57..a4d442c8ecda 100644
--- a/vendor/github.com/containernetworking/cni/pkg/version/version.go
+++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go
@@ -19,13 +19,12 @@ import (
"fmt"
"github.com/containernetworking/cni/pkg/types"
- types100 "github.com/containernetworking/cni/pkg/types/100"
"github.com/containernetworking/cni/pkg/types/create"
)
// Current reports the version of the CNI spec implemented by this library
func Current() string {
- return types100.ImplementedSpecVersion
+ return "1.1.0"
}
// Legacy PluginInfo describes a plugin that is backwards compatible with the
@@ -35,8 +34,10 @@ func Current() string {
//
// Any future CNI spec versions which meet this definition should be added to
// this list.
-var Legacy = PluginSupports("0.1.0", "0.2.0")
-var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0")
+var (
+ Legacy = PluginSupports("0.1.0", "0.2.0")
+ All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0")
+)
// VersionsFrom returns a list of versions starting from min, inclusive
func VersionsStartingFrom(min string) PluginInfo {
diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml
index d3800d1ea3e4..bf39af8368e8 100644
--- a/vendor/github.com/containers/ocicrypt/.golangci.yml
+++ b/vendor/github.com/containers/ocicrypt/.golangci.yml
@@ -7,7 +7,7 @@ linters:
- goimports
- revive
- ineffassign
- - vet
+ - govet
- unused
- misspell
diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
index 0c485d514c9b..b8436a8d5b13 100644
--- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
+++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
@@ -96,9 +96,8 @@ func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) {
return v, ok
} else if v, ok := lbco.Private.CipherOptions[key]; ok {
return v, ok
- } else {
- return nil, false
}
+ return nil, false
}
func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer {
diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go
index 3912e82dca0e..3bba4669bd8a 100644
--- a/vendor/github.com/containers/ocicrypt/gpg.go
+++ b/vendor/github.com/containers/ocicrypt/gpg.go
@@ -79,9 +79,8 @@ func GuessGPGVersion() GPGVersion {
return GPGv2
} else if err := exec.Command("gpg", "--version").Run(); err == nil {
return GPGv1
- } else {
- return GPGVersionUndetermined
}
+ return GPGVersionUndetermined
}
// NewGPGClient creates a new GPGClient object representing the given version
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
index 24e1d619d6e7..c1bdd6fbeb02 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
@@ -24,7 +24,7 @@ import (
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
- "github.com/go-jose/go-jose/v3"
+ "github.com/go-jose/go-jose/v4"
)
type jweKeyWrapper struct {
@@ -65,7 +65,11 @@ func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]
}
func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) {
- jwe, err := jose.ParseEncrypted(string(jweString))
+ // cf. list of algorithms in func addPubKeys() below
+ keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW}
+ // accept all algorithms defined in RFC 7518, section 5.1
+ contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM}
+ jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption)
if err != nil {
return nil, errors.New("jose.ParseEncrypted failed")
}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
index ddb244a8058d..6ac0fcb95a87 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
@@ -124,9 +124,8 @@ func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []b
}
return protocolOuput.KeyWrapResults.Annotation, nil
- } else {
- return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
+ return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
return nil, nil
@@ -162,9 +161,8 @@ func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString
}
return protocolOuput.KeyUnwrapResults.OptsData, nil
- } else {
- return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
+ return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) {
diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go
index 160f747b28dd..f653f2efcb19 100644
--- a/vendor/github.com/containers/ocicrypt/utils/utils.go
+++ b/vendor/github.com/containers/ocicrypt/utils/utils.go
@@ -26,7 +26,7 @@ import (
"strings"
"github.com/containers/ocicrypt/crypto/pkcs11"
- "github.com/go-jose/go-jose/v3"
+ "github.com/go-jose/go-jose/v4"
"golang.org/x/crypto/openpgp"
)
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
index b48005673455..42bf32aab003 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
@@ -9,6 +9,8 @@ func Render(doc []byte) []byte {
renderer := NewRoffRenderer()
return blackfriday.Run(doc,
- []blackfriday.Option{blackfriday.WithRenderer(renderer),
- blackfriday.WithExtensions(renderer.GetExtensions())}...)
+ []blackfriday.Option{
+ blackfriday.WithRenderer(renderer),
+ blackfriday.WithExtensions(renderer.GetExtensions()),
+ }...)
}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
index be2b3436062d..8a290f1972a2 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
@@ -1,6 +1,8 @@
package md2man
import (
+ "bufio"
+ "bytes"
"fmt"
"io"
"os"
@@ -20,34 +22,35 @@ type roffRenderer struct {
}
const (
- titleHeader = ".TH "
- topLevelHeader = "\n\n.SH "
- secondLevelHdr = "\n.SH "
- otherHeader = "\n.SS "
- crTag = "\n"
- emphTag = "\\fI"
- emphCloseTag = "\\fP"
- strongTag = "\\fB"
- strongCloseTag = "\\fP"
- breakTag = "\n.br\n"
- paraTag = "\n.PP\n"
- hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
- linkTag = "\n\\[la]"
- linkCloseTag = "\\[ra]"
- codespanTag = "\\fB\\fC"
- codespanCloseTag = "\\fR"
- codeTag = "\n.PP\n.RS\n\n.nf\n"
- codeCloseTag = "\n.fi\n.RE\n"
- quoteTag = "\n.PP\n.RS\n"
- quoteCloseTag = "\n.RE\n"
- listTag = "\n.RS\n"
- listCloseTag = "\n.RE\n"
- dtTag = "\n.TP\n"
- dd2Tag = "\n"
- tableStart = "\n.TS\nallbox;\n"
- tableEnd = ".TE\n"
- tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
+ titleHeader = ".TH "
+ topLevelHeader = "\n\n.SH "
+ secondLevelHdr = "\n.SH "
+ otherHeader = "\n.SS "
+ crTag = "\n"
+ emphTag = "\\fI"
+ emphCloseTag = "\\fP"
+ strongTag = "\\fB"
+ strongCloseTag = "\\fP"
+ breakTag = "\n.br\n"
+ paraTag = "\n.PP\n"
+ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
+ linkTag = "\n\\[la]"
+ linkCloseTag = "\\[ra]"
+ codespanTag = "\\fB"
+ codespanCloseTag = "\\fR"
+ codeTag = "\n.EX\n"
+ codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
+ quoteTag = "\n.PP\n.RS\n"
+ quoteCloseTag = "\n.RE\n"
+ listTag = "\n.RS\n"
+ listCloseTag = "\n.RE\n"
+ dtTag = "\n.TP\n"
+ dd2Tag = "\n"
+ tableStart = "\n.TS\nallbox;\n"
+ tableEnd = ".TE\n"
+ tableCellStart = "T{\n"
+ tableCellEnd = "\nT}\n"
+ tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
@@ -74,6 +77,16 @@ func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
// RenderHeader handles outputting the header at document start
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
+ // We need to walk the tree to check if there are any tables.
+ // If there are, we need to enable the roff table preprocessor.
+ ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ if node.Type == blackfriday.Table {
+ out(w, tablePreprocessor+"\n")
+ return blackfriday.Terminate
+ }
+ return blackfriday.GoToNext
+ })
+
// disable hyphenation
out(w, ".nh\n")
}
@@ -86,8 +99,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
// RenderNode is called for each node in a markdown document; based on the node
// type the equivalent roff output is sent to the writer
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
-
- var walkAction = blackfriday.GoToNext
+ walkAction := blackfriday.GoToNext
switch node.Type {
case blackfriday.Text:
@@ -109,9 +121,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
out(w, strongCloseTag)
}
case blackfriday.Link:
- if !entering {
- out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
+ // Don't render the link text for automatic links, because this
+ // will only duplicate the URL in the roff output.
+ // See https://daringfireball.net/projects/markdown/syntax#autolink
+ if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
+ out(w, string(node.FirstChild.Literal))
}
+ // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
+ escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
+ out(w, linkTag+escapedLink+linkCloseTag)
+ walkAction = blackfriday.SkipChildren
case blackfriday.Image:
// ignore images
walkAction = blackfriday.SkipChildren
@@ -160,6 +179,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
r.handleTableCell(w, node, entering)
case blackfriday.HTMLSpan:
// ignore other HTML tags
+ case blackfriday.HTMLBlock:
+ if bytes.HasPrefix(node.Literal, []byte("
+
+[fxamacker/cbor](/~https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
+
+CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
+
+`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](/~https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
+
+See [Quick Start](#quick-start) and [Releases](/~https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
+
+## fxamacker/cbor
+
+[![](/~https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](/~https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
+[![](/~https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](/~https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
+[![CodeQL](/~https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](/~https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
+[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage)
+[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor)
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
+
+Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
+
+Highlights
+
+__🚀 Speed__
+
+Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
+
+__🔒 Security__
+
+Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+
+Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](/~https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
+
+__🗜️ Data Size__
+
+Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
+
+__:jigsaw: Usability__
+
+API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
+
+Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
+
+__📆 Extensibility__
+
+Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
+
+
+
+
+
+### Secure Decoding with Configurable Settings
+
+`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
+
+By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+
+Example decoding with encoding/gob 💥 fatal error (out of memory)
+
+```Go
+// Example of encoding/gob having "fatal error: runtime: out of memory"
+// while decoding 181 bytes.
+package main
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/hex"
+ "fmt"
+)
+
+// Example data is from /~https://github.com/golang/go/issues/24446
+// (shortened to 181 bytes).
+const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+ "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+ "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+ "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+ "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+ "303030303030303030303030303030303030303030303030303030303030" +
+ "30"
+
+type X struct {
+ J *X
+ K map[string]int
+}
+
+func main() {
+ raw, _ := hex.DecodeString(data)
+ decoder := gob.NewDecoder(bytes.NewReader(raw))
+
+ var x X
+ decoder.Decode(&x) // fatal error: runtime: out of memory
+ fmt.Println("Decoding finished.")
+}
+```
+
+
+
+
+
+`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
+decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
+
+| Codec | Speed (ns/op) | Memory | Allocs |
+| :---- | ------------: | -----: | -----: |
+| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
+| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
+
+Benchmark details
+
+Latest comparison used:
+- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
+- go test -bench=. -benchmem -count=20
+
+#### Prior comparisons
+
+| Codec | Speed (ns/op) | Memory | Allocs |
+| :---- | ------------: | -----: | -----: |
+| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
+| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
+| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
+| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
+
+- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+- go1.19.6, linux/amd64, i5-13600K (DDR4)
+- go test -bench=. -benchmem -count=20
+
+
+
+
+
+### Smaller Encodings with Struct Tags
+
+Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+
+Example encoding 3-level nested Go struct to 1 byte CBOR
+
+https://go.dev/play/p/YxwvfPdFQG2
+
+```Go
+// Example encoding nested struct (with omitempty tag)
+// - encoding/json: 18 byte JSON
+// - fxamacker/cbor: 1 byte CBOR
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type GrandChild struct {
+ Quux int `json:",omitempty"`
+}
+
+type Child struct {
+ Baz int `json:",omitempty"`
+ Qux GrandChild `json:",omitempty"`
+}
+
+type Parent struct {
+ Foo Child `json:",omitempty"`
+ Bar int `json:",omitempty"`
+}
+
+func cb() {
+ results, _ := cbor.Marshal(Parent{})
+ fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+
+ text, _ := cbor.Diagnose(results) // Diagnostic Notation
+ fmt.Println("DN: " + text)
+}
+
+func js() {
+ results, _ := json.Marshal(Parent{})
+ fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+
+ text := string(results) // JSON
+ fmt.Println("JSON: " + text)
+}
+
+func main() {
+ cb()
+ fmt.Println("-------------")
+ js()
+}
+```
+
+Output (DN is Diagnostic Notation):
+```
+hex(CBOR): a0
+DN: {}
+-------------
+hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+JSON: {"Foo":{"Qux":{}}}
+```
+
+
+
+
+
+Example using different struct tags together:
+
+![alt text](/~https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
+
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
+
+## Quick Start
+
+__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
+
+### Key Points
+
+This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
+
+- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
+- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
+
+Configurable limits and options can be used to balance trade-offs.
+
+- Encoding and decoding modes are created from options (settings).
+- Modes can be created at startup and reused.
+- Modes are safe for concurrent use.
+
+### Default Mode
+
+Package level functions only use this library's default settings.
+They provide the "default mode" of encoding and decoding.
+
+```go
+// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
+b, err = cbor.Marshal(v) // encode v to []byte b
+err = cbor.Unmarshal(b, &v) // decode []byte b to v
+decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
+err = decoder.Decode(&v) // decode a CBOR data item to v
+
+// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
+err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
+
+// v2.5.0 added new functions that return remaining bytes.
+
+// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
+rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
+
+// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
+text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
+
+// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
+// but new funcs UnmarshalFirst and DiagnoseFirst do not.
+```
+
+__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
+
+- Different CBOR libraries may use different default settings.
+- CBOR-based formats or protocols usually require specific settings.
+
+For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+
+### Presets
+
+Presets can be used as-is or as a starting point for custom settings.
+
+```go
+// EncOptions is a struct of encoder settings.
+func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
+func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
+func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
+func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
+```
+
+Presets are used to create custom modes.
+
+### Custom Modes
+
+Modes are created from settings. Once created, modes have immutable settings.
+
+💡 Create the mode at startup and reuse it. It is safe for concurrent use.
+
+```Go
+// Create encoding mode.
+opts := cbor.CoreDetEncOptions() // use preset options as a starting point
+opts.Time = cbor.TimeUnix // change any settings if needed
+em, err := opts.EncMode() // create an immutable encoding mode
+
+// Reuse the encoding mode. It is safe for concurrent use.
+
+// API matches encoding/json.
+b, err := em.Marshal(v) // encode v to []byte b
+encoder := em.NewEncoder(w) // create encoder with io.Writer w
+err := encoder.Encode(v) // encode v to io.Writer w
+```
+
+Default mode and custom modes automatically apply struct tags.
+
+### User Specified Buffer for Encoding (v2.7.0)
+
+`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
+
+```Go
+em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
+
+var buf bytes.Buffer
+err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
+```
+
+### Struct Tags
+
+Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+
+Example encoding 3-level nested Go struct to 1 byte CBOR
+
+https://go.dev/play/p/YxwvfPdFQG2
+
+```Go
+// Example encoding nested struct (with omitempty tag)
+// - encoding/json: 18 byte JSON
+// - fxamacker/cbor: 1 byte CBOR
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type GrandChild struct {
+ Quux int `json:",omitempty"`
+}
+
+type Child struct {
+ Baz int `json:",omitempty"`
+ Qux GrandChild `json:",omitempty"`
+}
+
+type Parent struct {
+ Foo Child `json:",omitempty"`
+ Bar int `json:",omitempty"`
+}
+
+func cb() {
+ results, _ := cbor.Marshal(Parent{})
+ fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+
+ text, _ := cbor.Diagnose(results) // Diagnostic Notation
+ fmt.Println("DN: " + text)
+}
+
+func js() {
+ results, _ := json.Marshal(Parent{})
+ fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+
+ text := string(results) // JSON
+ fmt.Println("JSON: " + text)
+}
+
+func main() {
+ cb()
+ fmt.Println("-------------")
+ js()
+}
+```
+
+Output (DN is Diagnostic Notation):
+```
+hex(CBOR): a0
+DN: {}
+-------------
+hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+JSON: {"Foo":{"Qux":{}}}
+```
+
+
+
+
+
+Example using several struct tags
+
+![alt text](/~https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")
+
+
+
+Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+
+### CBOR Tags
+
+CBOR tags are specified in a `TagSet`.
+
+Custom modes can be created with a `TagSet` to handle CBOR tags.
+
+```go
+em, err := opts.EncMode() // no CBOR tags
+em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
+em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
+```
+
+`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
+
+Example using TagSet and TagOptions
+
+```go
+// Use signedCWT struct defined in "Decoding CWT" example.
+
+// Create TagSet (safe for concurrency).
+tags := cbor.NewTagSet()
+// Register tag COSE_Sign1 18 with signedCWT type.
+tags.Add(
+ cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
+ reflect.TypeOf(signedCWT{}),
+ 18)
+
+// Create DecMode with immutable tags.
+dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
+
+// Unmarshal to signedCWT with tag support.
+var v signedCWT
+if err := dm.Unmarshal(data, &v); err != nil {
+ return err
+}
+
+// Create EncMode with immutable tags.
+em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
+
+// Marshal signedCWT with tag number.
+if data, err := cbor.Marshal(v); err != nil {
+ return err
+}
+```
+
+
+
+### Functions and Interfaces
+
+Functions and interfaces at a glance
+
+Common functions with same API as `encoding/json`:
+- `Marshal`, `Unmarshal`
+- `NewEncoder`, `(*Encoder).Encode`
+- `NewDecoder`, `(*Decoder).Decode`
+
+NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
+because RFC 8949 treats CBOR data item with remaining bytes as malformed.
+- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
+
+Other useful functions:
+- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
+- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
+- `Wellformed` returns true if the the CBOR data item is well-formed.
+
+Interfaces identical or comparable to Go `encoding` packages include:
+`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
+
+The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
+
+
+
+### Security Tips
+
+🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
+
+Default limits may need to be increased for systems handling very large data (e.g. blockchains).
+
+`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
+
+## Status
+
+v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
+
+For more details, see [release notes](/~https://github.com/fxamacker/cbor/releases).
+
+### Prior Release
+
+[v2.6.0](/~https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
+
+v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](/~https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](/~https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
+
+See [v2.5.0 release notes](/~https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
+
+See ["Version and API Changes"](/~https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
+
+
+
+## Who uses fxamacker/cbor
+
+`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
+
+`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](/~https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
+
+## Standards
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Notable CBOR features include:
+
+| CBOR Feature | Description |
+| :--- | :--- |
+| CBOR tags | API supports built-in and user-defined tags. |
+| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
+| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
+| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
+| Indefinite length data | Option to allow/forbid for encoding and decoding. |
+| Well-formedness | Always checked and enforced. |
+| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
+| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
+
+Known limitations are noted in the [Limitations section](#limitations).
+
+Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
+
+Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
+
+After well-formedness is verified, basic validity errors are handled as follows:
+
+* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
+* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
+
+When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
+
+By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
+
+__Click to expand topic:__
+
+
+ Duplicate Map Keys
+
+This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
+
+`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
+
+`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
+
+APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
+
+
+
+
+ Tag Validity
+
+This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
+
+* Inadmissible type for tag content
+* Inadmissible value for tag content
+
+Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
+
+* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
+* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
+
+Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
+
+For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
+
+
The table above has the results of the most common use-cases. The table below
+contains the results of all benchmarks, including unrealistic ones. It is
+provided for completeness.
+
+
+
+
Benchmark
go-toml v1
BurntSushi/toml
+
+
+
Marshal/SimpleDocument/map-2
1.8x
2.7x
+
Marshal/SimpleDocument/struct-2
2.7x
3.8x
+
Unmarshal/SimpleDocument/map-2
3.8x
3.0x
+
Unmarshal/SimpleDocument/struct-2
5.6x
4.1x
+
UnmarshalDataset/example-2
3.0x
3.2x
+
UnmarshalDataset/code-2
2.3x
2.9x
+
UnmarshalDataset/twitter-2
2.6x
2.7x
+
UnmarshalDataset/citm_catalog-2
2.2x
2.3x
+
UnmarshalDataset/canada-2
1.8x
1.5x
+
UnmarshalDataset/config-2
4.1x
2.9x
+
geomean
2.7x
2.8x
+
+
+
This table can be generated with ./ci.sh benchmark -a -html.
+
+
+## Modules
+
+go-toml uses Go's standard modules system.
+
+Installation instructions:
+
+- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals
+ with it automatically.
+- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`.
+
+In case of trouble: [Go Modules FAQ][mod-faq].
+
+[mod-faq]: /~https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module
+
+## Tools
+
+Go-toml provides three handy command line tools:
+
+ * `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+ ```
+ $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
+ $ tomljson --help
+ ```
+
+ * `jsontoml`: Reads a JSON file and outputs a TOML representation.
+
+ ```
+ $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
+ $ jsontoml --help
+ ```
+
+ * `tomll`: Lints and reformats a TOML file.
+
+ ```
+ $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
+ $ tomll --help
+ ```
+
+### Docker image
+
+Those tools are also available as a [Docker image][docker]. For example, to use
+`tomljson`:
+
+```
+docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml
+```
+
+Multiple versions are available on [ghcr.io][docker].
+
+[docker]: /~https://github.com/pelletier/go-toml/pkgs/container/go-toml
+
+## Migrating from v1
+
+This section describes the differences between v1 and v2, with some pointers on
+how to get the original behavior when possible.
+
+### Decoding / Unmarshal
+
+#### Automatic field name guessing
+
+When unmarshaling to a struct, if a key in the TOML document does not exactly
+match the name of a struct field or any of the `toml`-tagged field, v1 tries
+multiple variations of the key ([code][v1-keys]).
+
+V2 instead does a case-insensitive matching, like `encoding/json`.
+
+This could impact you if you are relying on casing to differentiate two fields,
+and one of them is a not using the `toml` struct tag. The recommended solution
+is to be specific about tag names for those fields using the `toml` struct tag.
+
+[v1-keys]: /~https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781
+
+#### Ignore preexisting value in interface
+
+When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the
+element in the interface to decode the object. For example:
+
+```go
+type inner struct {
+ B interface{}
+}
+type doc struct {
+ A interface{}
+}
+
+d := doc{
+ A: inner{
+ B: "Before",
+ },
+}
+
+data := `
+[A]
+B = "After"
+`
+
+toml.Unmarshal([]byte(data), &d)
+fmt.Printf("toml v1: %#v\n", d)
+
+// toml v1: main.doc{A:main.inner{B:"After"}}
+```
+
+In this case, field `A` is of type `interface{}`, containing a `inner` struct.
+V1 sees that type and uses it when decoding the object.
+
+When decoding an object into an `interface{}`, V2 instead disregards whatever
+value the `interface{}` may contain and replaces it with a
+`map[string]interface{}`. With the same data structure as above, here is what
+the result looks like:
+
+```go
+toml.Unmarshal([]byte(data), &d)
+fmt.Printf("toml v2: %#v\n", d)
+
+// toml v2: main.doc{A:map[string]interface {}{"B":"After"}}
+```
+
+This is to match `encoding/json`'s behavior. There is no way to make the v2
+decoder behave like v1.
+
+#### Values out of array bounds ignored
+
+When decoding into an array, v1 returns an error when the number of elements
+contained in the doc is superior to the capacity of the array. For example:
+
+```go
+type doc struct {
+ A [2]string
+}
+d := doc{}
+err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
+fmt.Println(err)
+
+// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2)
+```
+
+In the same situation, v2 ignores the last value:
+
+```go
+err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
+fmt.Println("err:", err, "d:", d)
+// err: d: {[one two]}
+```
+
+This is to match `encoding/json`'s behavior. There is no way to make the v2
+decoder behave like v1.
+
+#### Support for `toml.Unmarshaler` has been dropped
+
+This method was not widely used, poorly defined, and added a lot of complexity.
+A similar effect can be achieved by implementing the `encoding.TextUnmarshaler`
+interface and use strings.
+
+#### Support for `default` struct tag has been dropped
+
+This feature adds complexity and a poorly defined API for an effect that can be
+accomplished outside of the library.
+
+It does not seem like other format parsers in Go support that feature (the
+project referenced in the original ticket #202 has not been updated since 2017).
+Given that go-toml v2 should not touch values not in the document, the same
+effect can be achieved by pre-filling the struct with defaults (libraries like
+[go-defaults][go-defaults] can help). Also, string representation is not well
+defined for all types: it creates issues like #278.
+
+The recommended replacement is pre-filling the struct before unmarshaling.
+
+[go-defaults]: /~https://github.com/mcuadros/go-defaults
+
+#### `toml.Tree` replacement
+
+This structure was the initial attempt at providing a document model for
+go-toml. It allows manipulating the structure of any document, encoding and
+decoding from their TOML representation. While a more robust feature was
+initially planned in go-toml v2, this has been ultimately [removed from
+scope][nodoc] of this library, with no plan to add it back at the moment. The
+closest equivalent at the moment would be to unmarshal into an `interface{}` and
+use type assertions and/or reflection to manipulate the arbitrary
+structure. However this would fall short of providing all of the TOML features
+such as adding comments and be specific about whitespace.
+
+
+#### `toml.Position` are not retrievable anymore
+
+The API for retrieving the position (line, column) of a specific TOML element do
+not exist anymore. This was done to minimize the amount of concepts introduced
+by the library (query path), and avoid the performance hit related to storing
+positions in the absence of a document model, for a feature that seemed to have
+little use. Errors however have gained more detailed position
+information. Position retrieval seems better fitted for a document model, which
+has been [removed from the scope][nodoc] of go-toml v2 at the moment.
+
+### Encoding / Marshal
+
+#### Default struct fields order
+
+V1 emits struct fields order alphabetically by default. V2 struct fields are
+emitted in order they are defined. For example:
+
+```go
+type S struct {
+ B string
+ A string
+}
+
+data := S{
+ B: "B",
+ A: "A",
+}
+
+b, _ := tomlv1.Marshal(data)
+fmt.Println("v1:\n" + string(b))
+
+b, _ = tomlv2.Marshal(data)
+fmt.Println("v2:\n" + string(b))
+
+// Output:
+// v1:
+// A = "A"
+// B = "B"
+
+// v2:
+// B = 'B'
+// A = 'A'
+```
+
+There is no way to make v2 encoder behave like v1. A workaround could be to
+manually sort the fields alphabetically in the struct definition, or generate
+struct types using `reflect.StructOf`.
+
+#### No indentation by default
+
+V1 automatically indents content of tables by default. V2 does not. However the
+same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example:
+
+```go
+data := map[string]interface{}{
+ "table": map[string]string{
+ "key": "value",
+ },
+}
+
+b, _ := tomlv1.Marshal(data)
+fmt.Println("v1:\n" + string(b))
+
+b, _ = tomlv2.Marshal(data)
+fmt.Println("v2:\n" + string(b))
+
+buf := bytes.Buffer{}
+enc := tomlv2.NewEncoder(&buf)
+enc.SetIndentTables(true)
+enc.Encode(data)
+fmt.Println("v2 Encoder:\n" + string(buf.Bytes()))
+
+// Output:
+// v1:
+//
+// [table]
+// key = "value"
+//
+// v2:
+// [table]
+// key = 'value'
+//
+//
+// v2 Encoder:
+// [table]
+// key = 'value'
+```
+
+[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables
+
+#### Keys and strings are single quoted
+
+V1 always uses double quotes (`"`) around strings and keys that cannot be
+represented bare (unquoted). V2 uses single quotes instead by default (`'`),
+unless a character cannot be represented, then falls back to double quotes. As a
+result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not
+useful anymore.
+
+There is no way to make v2 encoder behave like v1.
+
+#### `TextMarshaler` emits as a string, not TOML
+
+Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in
+v1. The encoder would append the result to the output directly. In v2 the result
+is wrapped in a string. As a result, this interface cannot be implemented by the
+root object.
+
+There is no way to make v2 encoder behave like v1.
+
+[tm]: https://golang.org/pkg/encoding/#TextMarshaler
+
+#### `Encoder.CompactComments` has been removed
+
+Emitting compact comments is now the default behavior of go-toml. This option
+is not necessary anymore.
+
+#### Struct tags have been merged
+
+V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
+`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
+`toml`, `multiline`, `commented`, and `omitempty`. For example:
+
+```go
+type doc struct {
+ // v1
+ F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"`
+ // v2
+ F string `toml:"field,multiline,omitempty,commented"`
+}
+```
+
+Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
+one tag now.
+
+#### `Encoder.ArraysWithOneElementPerLine` has been renamed
+
+The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
+
+#### `Encoder.Indentation` has been renamed
+
+The new name is `Encoder.SetIndentSymbol`. The behavior should be the same.
+
+
+#### Embedded structs behave like stdlib
+
+V1 defaults to merging embedded struct fields into the embedding struct. This
+behavior was unexpected because it does not follow the standard library. To
+avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was
+added to make the encoder behave correctly. Given backward compatibility is not
+a problem anymore, v2 does the right thing by default: it follows the behavior
+of `encoding/json`. `Encoder.PromoteAnonymous` has been removed.
+
+[nodoc]: /~https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038
+
+### `query`
+
+go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run
+JSONPath-style queries on TOML files. This feature is not available in v2. For a
+replacement, check out [dasel][dasel].
+
+This package has been removed because it was essentially not supported anymore
+(last commit May 2020), increased the complexity of the code base, and more
+complete solutions exist out there.
+
+[query]: /~https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
+[dasel]: /~https://github.com/TomWright/dasel
+
+## Versioning
+
+Expect for parts explicitly marked otherwise, go-toml follows [Semantic
+Versioning](https://semver.org). The supported version of
+[TOML](/~https://github.com/toml-lang/toml) is indicated at the beginning of this
+document. The last two major versions of Go are supported (see [Go Release
+Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT). Read [LICENSE](LICENSE).
diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
new file mode 100644
index 000000000000..d4d554fda9d1
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
@@ -0,0 +1,16 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ---------- | ------------------ |
+| Latest 2.x | :white_check_mark: |
+| All 1.x | :x: |
+| All 0.x | :x: |
+
+## Reporting a Vulnerability
+
+Email a vulnerability report to `security@pelletier.codes`. Make sure to include
+as many details as possible to reproduce the vulnerability. This is a
+side-project: I will try to get back to you as quickly as possible, time
+permitting in my personal life. Providing a working patch helps very much!
diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh
new file mode 100644
index 000000000000..86217a9b0979
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh
@@ -0,0 +1,284 @@
+#!/usr/bin/env bash
+
+
+stderr() {
+ echo "$@" 1>&2
+}
+
+usage() {
+ b=$(basename "$0")
+ echo $b: ERROR: "$@" 1>&2
+
+ cat 1>&2 < coverage.out
+ go tool cover -func=coverage.out
+ echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2
+ popd
+
+ if [ "${branch}" != "HEAD" ]; then
+ git worktree remove --force "$dir"
+ fi
+}
+
+coverage() {
+ case "$1" in
+ -d)
+ shift
+ target="${1?Need to provide a target branch argument}"
+
+ output_dir="$(mktemp -d)"
+ target_out="${output_dir}/target.txt"
+ head_out="${output_dir}/head.txt"
+
+ cover "${target}" > "${target_out}"
+ cover "HEAD" > "${head_out}"
+
+ cat "${target_out}"
+ cat "${head_out}"
+
+ echo ""
+
+ target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')"
+ head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')"
+ echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%"
+
+ delta_pct=$(echo "$head_pct - $target_pct" | bc -l)
+ echo "Delta: ${delta_pct}"
+
+ if [[ $delta_pct = \-* ]]; then
+ echo "Regression!";
+
+ target_diff="${output_dir}/target.diff.txt"
+ head_diff="${output_dir}/head.diff.txt"
+ cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}"
+ cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}"
+
+ diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}"
+ return 1
+ fi
+ return 0
+ ;;
+ esac
+
+ cover "${1-HEAD}"
+}
+
+bench() {
+ branch="${1}"
+ out="${2}"
+ replace="${3}"
+ dir="$(mktemp -d)"
+
+ stderr "Executing benchmark for ${branch} at ${dir}"
+
+ if [ "${branch}" = "HEAD" ]; then
+ cp -r . "${dir}/"
+ else
+ git worktree add "$dir" "$branch"
+ fi
+
+ pushd "$dir"
+
+ if [ "${replace}" != "" ]; then
+ find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \;
+ go get "${replace}"
+ fi
+
+ export GOMAXPROCS=2
+ go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}"
+ popd
+
+ if [ "${branch}" != "HEAD" ]; then
+ git worktree remove --force "$dir"
+ fi
+}
+
+fmktemp() {
+ if mktemp --version &> /dev/null; then
+ # GNU
+ mktemp --suffix=-$1
+ else
+ # BSD
+ mktemp -t $1
+ fi
+}
+
+benchstathtml() {
+python3 - $1 <<'EOF'
+import sys
+
+lines = []
+stop = False
+
+with open(sys.argv[1]) as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line == "":
+ stop = True
+ if not stop:
+ lines.append(line.split(','))
+
+results = []
+for line in reversed(lines[2:]):
+ if len(line) < 8 or line[0] == "":
+ continue
+ v2 = float(line[1])
+ results.append([
+ line[0].replace("-32", ""),
+ "%.1fx" % (float(line[3])/v2), # v1
+ "%.1fx" % (float(line[7])/v2), # bs
+ ])
+# move geomean to the end
+results.append(results[0])
+del results[0]
+
+
+def printtable(data):
+ print("""
+
+
+
Benchmark
go-toml v1
BurntSushi/toml
+
+ """)
+
+ for r in data:
+ print("
{}
{}
{}
".format(*r))
+
+ print("""
+
""")
+
+
+def match(x):
+ return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0]
+
+above = [x for x in results if match(x)]
+below = [x for x in results if not match(x)]
+
+printtable(above)
+print("See more")
+print("""
The table above has the results of the most common use-cases. The table below
+contains the results of all benchmarks, including unrealistic ones. It is
+provided for completeness.
""")
+printtable(below)
+print('
This table can be generated with ./ci.sh benchmark -a -html.
')
+print("")
+
+EOF
+}
+
+benchmark() {
+ case "$1" in
+ -d)
+ shift
+ target="${1?Need to provide a target branch argument}"
+
+ old=`fmktemp ${target}`
+ bench "${target}" "${old}"
+
+ new=`fmktemp HEAD`
+ bench HEAD "${new}"
+
+ benchstat "${old}" "${new}"
+ return 0
+ ;;
+ -a)
+ shift
+
+ v2stats=`fmktemp go-toml-v2`
+ bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2"
+ v1stats=`fmktemp go-toml-v1`
+ bench HEAD "${v1stats}" "github.com/pelletier/go-toml"
+ bsstats=`fmktemp bs-toml`
+ bench HEAD "${bsstats}" "github.com/BurntSushi/toml"
+
+ cp "${v2stats}" go-toml-v2.txt
+ cp "${v1stats}" go-toml-v1.txt
+ cp "${bsstats}" bs-toml.txt
+
+ if [ "$1" = "-html" ]; then
+ tmpcsv=`fmktemp csv`
+ benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
+ benchstathtml $tmpcsv
+ else
+ benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt
+ fi
+
+ rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt
+ return $?
+ esac
+
+ bench "${1-HEAD}" `mktemp`
+}
+
+case "$1" in
+ coverage) shift; coverage $@;;
+ benchmark) shift; benchmark $@;;
+ *) usage "bad argument $1";;
+esac
diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go
new file mode 100644
index 000000000000..f0ec3b17054d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/decode.go
@@ -0,0 +1,550 @@
+package toml
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+
+ "github.com/pelletier/go-toml/v2/unstable"
+)
+
+func parseInteger(b []byte) (int64, error) {
+ if len(b) > 2 && b[0] == '0' {
+ switch b[1] {
+ case 'x':
+ return parseIntHex(b)
+ case 'b':
+ return parseIntBin(b)
+ case 'o':
+ return parseIntOct(b)
+ default:
+ panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1]))
+ }
+ }
+
+ return parseIntDec(b)
+}
+
+func parseLocalDate(b []byte) (LocalDate, error) {
+ // full-date = date-fullyear "-" date-month "-" date-mday
+ // date-fullyear = 4DIGIT
+ // date-month = 2DIGIT ; 01-12
+ // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
+ var date LocalDate
+
+ if len(b) != 10 || b[4] != '-' || b[7] != '-' {
+ return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD")
+ }
+
+ var err error
+
+ date.Year, err = parseDecimalDigits(b[0:4])
+ if err != nil {
+ return LocalDate{}, err
+ }
+
+ date.Month, err = parseDecimalDigits(b[5:7])
+ if err != nil {
+ return LocalDate{}, err
+ }
+
+ date.Day, err = parseDecimalDigits(b[8:10])
+ if err != nil {
+ return LocalDate{}, err
+ }
+
+ if !isValidDate(date.Year, date.Month, date.Day) {
+ return LocalDate{}, unstable.NewParserError(b, "impossible date")
+ }
+
+ return date, nil
+}
+
+func parseDecimalDigits(b []byte) (int, error) {
+ v := 0
+
+ for i, c := range b {
+ if c < '0' || c > '9' {
+ return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)")
+ }
+ v *= 10
+ v += int(c - '0')
+ }
+
+ return v, nil
+}
+
+func parseDateTime(b []byte) (time.Time, error) {
+ // offset-date-time = full-date time-delim full-time
+ // full-time = partial-time time-offset
+ // time-offset = "Z" / time-numoffset
+ // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
+
+ dt, b, err := parseLocalDateTime(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ var zone *time.Location
+
+ if len(b) == 0 {
+ // parser should have checked that when assigning the date time node
+ panic("date time should have a timezone")
+ }
+
+ if b[0] == 'Z' || b[0] == 'z' {
+ b = b[1:]
+ zone = time.UTC
+ } else {
+ const dateTimeByteLen = 6
+ if len(b) != dateTimeByteLen {
+ return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone")
+ }
+ var direction int
+ switch b[0] {
+ case '-':
+ direction = -1
+ case '+':
+ direction = +1
+ default:
+ return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character")
+ }
+
+ if b[3] != ':' {
+ return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator")
+ }
+
+ hours, err := parseDecimalDigits(b[1:3])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if hours > 23 {
+ return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours")
+ }
+
+ minutes, err := parseDecimalDigits(b[4:6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if minutes > 59 {
+ return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes")
+ }
+
+ seconds := direction * (hours*3600 + minutes*60)
+ if seconds == 0 {
+ zone = time.UTC
+ } else {
+ zone = time.FixedZone("", seconds)
+ }
+ b = b[dateTimeByteLen:]
+ }
+
+ if len(b) > 0 {
+ return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone")
+ }
+
+ t := time.Date(
+ dt.Year,
+ time.Month(dt.Month),
+ dt.Day,
+ dt.Hour,
+ dt.Minute,
+ dt.Second,
+ dt.Nanosecond,
+ zone)
+
+ return t, nil
+}
+
+func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
+ var dt LocalDateTime
+
+ const localDateTimeByteMinLen = 11
+ if len(b) < localDateTimeByteMinLen {
+ return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
+ }
+
+ date, err := parseLocalDate(b[:10])
+ if err != nil {
+ return dt, nil, err
+ }
+ dt.LocalDate = date
+
+ sep := b[10]
+ if sep != 'T' && sep != ' ' && sep != 't' {
+ return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space")
+ }
+
+ t, rest, err := parseLocalTime(b[11:])
+ if err != nil {
+ return dt, nil, err
+ }
+ dt.LocalTime = t
+
+ return dt, rest, nil
+}
+
+// parseLocalTime is a bit different because it also returns the remaining
+// []byte that is didn't need. This is to allow parseDateTime to parse those
+// remaining bytes as a timezone.
+func parseLocalTime(b []byte) (LocalTime, []byte, error) {
+ var (
+ nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0}
+ t LocalTime
+ )
+
+ // check if b matches to have expected format HH:MM:SS[.NNNNNN]
+ const localTimeByteLen = 8
+ if len(b) < localTimeByteLen {
+ return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
+ }
+
+ var err error
+
+ t.Hour, err = parseDecimalDigits(b[0:2])
+ if err != nil {
+ return t, nil, err
+ }
+
+ if t.Hour > 23 {
+ return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23")
+ }
+ if b[2] != ':' {
+ return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes")
+ }
+
+ t.Minute, err = parseDecimalDigits(b[3:5])
+ if err != nil {
+ return t, nil, err
+ }
+ if t.Minute > 59 {
+ return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59")
+ }
+ if b[5] != ':' {
+ return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds")
+ }
+
+ t.Second, err = parseDecimalDigits(b[6:8])
+ if err != nil {
+ return t, nil, err
+ }
+
+ if t.Second > 60 {
+ return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60")
+ }
+
+ b = b[8:]
+
+ if len(b) >= 1 && b[0] == '.' {
+ frac := 0
+ precision := 0
+ digits := 0
+
+ for i, c := range b[1:] {
+ if !isDigit(c) {
+ if i == 0 {
+ return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point")
+ }
+ break
+ }
+ digits++
+
+ const maxFracPrecision = 9
+ if i >= maxFracPrecision {
+ // go-toml allows decoding fractional seconds
+ // beyond the supported precision of 9
+ // digits. It truncates the fractional component
+ // to the supported precision and ignores the
+ // remaining digits.
+ //
+ // /~https://github.com/pelletier/go-toml/discussions/707
+ continue
+ }
+
+ frac *= 10
+ frac += int(c - '0')
+ precision++
+ }
+
+ if precision == 0 {
+ return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit")
+ }
+
+ t.Nanosecond = frac * nspow[precision]
+ t.Precision = precision
+
+ return t, b[1+digits:], nil
+ }
+ return t, b, nil
+}
+
+//nolint:cyclop
+func parseFloat(b []byte) (float64, error) {
+ if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' {
+ return math.NaN(), nil
+ }
+
+ cleaned, err := checkAndRemoveUnderscoresFloats(b)
+ if err != nil {
+ return 0, err
+ }
+
+ if cleaned[0] == '.' {
+ return 0, unstable.NewParserError(b, "float cannot start with a dot")
+ }
+
+ if cleaned[len(cleaned)-1] == '.' {
+ return 0, unstable.NewParserError(b, "float cannot end with a dot")
+ }
+
+ dotAlreadySeen := false
+ for i, c := range cleaned {
+ if c == '.' {
+ if dotAlreadySeen {
+ return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point")
+ }
+ if !isDigit(cleaned[i-1]) {
+ return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit")
+ }
+ if !isDigit(cleaned[i+1]) {
+ return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit")
+ }
+ dotAlreadySeen = true
+ }
+ }
+
+ start := 0
+ if cleaned[0] == '+' || cleaned[0] == '-' {
+ start = 1
+ }
+ if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) {
+ return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes")
+ }
+
+ f, err := strconv.ParseFloat(string(cleaned), 64)
+ if err != nil {
+ return 0, unstable.NewParserError(b, "unable to parse float: %w", err)
+ }
+
+ return f, nil
+}
+
+func parseIntHex(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 16, 64)
+ if err != nil {
+ return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err)
+ }
+
+ return i, nil
+}
+
+func parseIntOct(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 8, 64)
+ if err != nil {
+ return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err)
+ }
+
+ return i, nil
+}
+
+func parseIntBin(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 2, 64)
+ if err != nil {
+ return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err)
+ }
+
+ return i, nil
+}
+
+func isSign(b byte) bool {
+ return b == '+' || b == '-'
+}
+
+func parseIntDec(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b)
+ if err != nil {
+ return 0, err
+ }
+
+ startIdx := 0
+
+ if isSign(cleaned[0]) {
+ startIdx++
+ }
+
+ if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
+ return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number")
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 10, 64)
+ if err != nil {
+ return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err)
+ }
+
+ return i, nil
+}
+
+func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
+ start := 0
+ if b[start] == '+' || b[start] == '-' {
+ start++
+ }
+
+ if len(b) == start {
+ return b, nil
+ }
+
+ if b[start] == '_' {
+ return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore")
+ }
+
+ if b[len(b)-1] == '_' {
+ return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
+ }
+
+ // fast path
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '_' {
+ break
+ }
+ }
+ if i == len(b) {
+ return b, nil
+ }
+
+ before := false
+ cleaned := make([]byte, i, len(b))
+ copy(cleaned, b)
+
+ for i++; i < len(b); i++ {
+ c := b[i]
+ if c == '_' {
+ if !before {
+ return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
+ }
+ before = false
+ } else {
+ before = true
+ cleaned = append(cleaned, c)
+ }
+ }
+
+ return cleaned, nil
+}
+
+func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
+ if b[0] == '_' {
+ return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore")
+ }
+
+ if b[len(b)-1] == '_' {
+ return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
+ }
+
+ // fast path
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '_' {
+ break
+ }
+ }
+ if i == len(b) {
+ return b, nil
+ }
+
+ before := false
+ cleaned := make([]byte, 0, len(b))
+
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+
+ switch c {
+ case '_':
+ if !before {
+ return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
+ }
+ if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
+ return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent")
+ }
+ before = false
+ case '+', '-':
+ // signed exponents
+ cleaned = append(cleaned, c)
+ before = false
+ case 'e', 'E':
+ if i < len(b)-1 && b[i+1] == '_' {
+ return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent")
+ }
+ cleaned = append(cleaned, c)
+ case '.':
+ if i < len(b)-1 && b[i+1] == '_' {
+ return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point")
+ }
+ if i > 0 && b[i-1] == '_' {
+ return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point")
+ }
+ cleaned = append(cleaned, c)
+ default:
+ before = true
+ cleaned = append(cleaned, c)
+ }
+ }
+
+ return cleaned, nil
+}
+
+// isValidDate checks if a provided date is a date that exists.
+func isValidDate(year int, month int, day int) bool {
+ return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year)
+}
+
+// daysBefore[m] counts the number of days in a non-leap year
+// before month m begins. There is an entry for m=12, counting
+// the number of days before January of next year (365).
+var daysBefore = [...]int32{
+ 0,
+ 31,
+ 31 + 28,
+ 31 + 28 + 31,
+ 31 + 28 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
+}
+
+func daysIn(m int, year int) int {
+ if m == 2 && isLeap(year) {
+ return 29
+ }
+ return int(daysBefore[m] - daysBefore[m-1])
+}
+
+func isLeap(year int) bool {
+ return year%4 == 0 && (year%100 != 0 || year%400 == 0)
+}
+
+func isDigit(r byte) bool {
+ return r >= '0' && r <= '9'
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/doc.go b/vendor/github.com/pelletier/go-toml/v2/doc.go
new file mode 100644
index 000000000000..b7bc599bde45
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/doc.go
@@ -0,0 +1,2 @@
+// Package toml is a library to read and write TOML documents.
+package toml
diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go
new file mode 100644
index 000000000000..309733f1f9f4
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/errors.go
@@ -0,0 +1,252 @@
+package toml
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/pelletier/go-toml/v2/internal/danger"
+ "github.com/pelletier/go-toml/v2/unstable"
+)
+
+// DecodeError represents an error encountered during the parsing or decoding
+// of a TOML document.
+//
+// In addition to the error message, it contains the position in the document
+// where it happened, as well as a human-readable representation that shows
+// where the error occurred in the document.
+type DecodeError struct {
+ message string
+ line int
+ column int
+ key Key
+
+ human string
+}
+
+// StrictMissingError occurs in a TOML document that does not have a
+// corresponding field in the target value. It contains all the missing fields
+// in Errors.
+//
+// Emitted by Decoder when DisallowUnknownFields() was called.
+type StrictMissingError struct {
+ // One error per field that could not be found.
+ Errors []DecodeError
+}
+
+// Error returns the canonical string for this error.
+func (s *StrictMissingError) Error() string {
+ return "strict mode: fields in the document are missing in the target struct"
+}
+
+// String returns a human readable description of all errors.
+func (s *StrictMissingError) String() string {
+ var buf strings.Builder
+
+ for i, e := range s.Errors {
+ if i > 0 {
+ buf.WriteString("\n---\n")
+ }
+
+ buf.WriteString(e.String())
+ }
+
+ return buf.String()
+}
+
+type Key []string
+
+// Error returns the error message contained in the DecodeError.
+func (e *DecodeError) Error() string {
+ return "toml: " + e.message
+}
+
+// String returns the human-readable contextualized error. This string is multi-line.
+func (e *DecodeError) String() string {
+ return e.human
+}
+
+// Position returns the (line, column) pair indicating where the error
+// occurred in the document. Positions are 1-indexed.
+func (e *DecodeError) Position() (row int, column int) {
+ return e.line, e.column
+}
+
+// Key that was being processed when the error occurred. The key is present only
+// if this DecodeError is part of a StrictMissingError.
+func (e *DecodeError) Key() Key {
+ return e.key
+}
+
+// decodeErrorFromHighlight creates a DecodeError referencing a highlighted
+// range of bytes from document.
+//
+// highlight needs to be a sub-slice of document, or this function panics.
+//
+// The function copies all bytes used in DecodeError, so that document and
+// highlight can be freely deallocated.
+//
+//nolint:funlen
+func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
+ offset := danger.SubsliceOffset(document, de.Highlight)
+
+ errMessage := de.Error()
+ errLine, errColumn := positionAtEnd(document[:offset])
+ before, after := linesOfContext(document, de.Highlight, offset, 3)
+
+ var buf strings.Builder
+
+ maxLine := errLine + len(after) - 1
+ lineColumnWidth := len(strconv.Itoa(maxLine))
+
+ // Write the lines of context strictly before the error.
+ for i := len(before) - 1; i > 0; i-- {
+ line := errLine - i
+ buf.WriteString(formatLineNumber(line, lineColumnWidth))
+ buf.WriteString("|")
+
+ if len(before[i]) > 0 {
+ buf.WriteString(" ")
+ buf.Write(before[i])
+ }
+
+ buf.WriteRune('\n')
+ }
+
+ // Write the document line that contains the error.
+
+ buf.WriteString(formatLineNumber(errLine, lineColumnWidth))
+ buf.WriteString("| ")
+
+ if len(before) > 0 {
+ buf.Write(before[0])
+ }
+
+ buf.Write(de.Highlight)
+
+ if len(after) > 0 {
+ buf.Write(after[0])
+ }
+
+ buf.WriteRune('\n')
+
+ // Write the line with the error message itself (so it does not have a line
+ // number).
+
+ buf.WriteString(strings.Repeat(" ", lineColumnWidth))
+ buf.WriteString("| ")
+
+ if len(before) > 0 {
+ buf.WriteString(strings.Repeat(" ", len(before[0])))
+ }
+
+ buf.WriteString(strings.Repeat("~", len(de.Highlight)))
+
+ if len(errMessage) > 0 {
+ buf.WriteString(" ")
+ buf.WriteString(errMessage)
+ }
+
+ // Write the lines of context strictly after the error.
+
+ for i := 1; i < len(after); i++ {
+ buf.WriteRune('\n')
+ line := errLine + i
+ buf.WriteString(formatLineNumber(line, lineColumnWidth))
+ buf.WriteString("|")
+
+ if len(after[i]) > 0 {
+ buf.WriteString(" ")
+ buf.Write(after[i])
+ }
+ }
+
+ return &DecodeError{
+ message: errMessage,
+ line: errLine,
+ column: errColumn,
+ key: de.Key,
+ human: buf.String(),
+ }
+}
+
+func formatLineNumber(line int, width int) string {
+ format := "%" + strconv.Itoa(width) + "d"
+
+ return fmt.Sprintf(format, line)
+}
+
+func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) {
+ return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround)
+}
+
+func beforeLines(document []byte, offset int, linesAround int) [][]byte {
+ var beforeLines [][]byte
+
+ // Walk the document backward from the highlight to find previous lines
+ // of context.
+ rest := document[:offset]
+backward:
+ for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; {
+ switch {
+ case rest[o] == '\n':
+ // handle individual lines
+ beforeLines = append(beforeLines, rest[o+1:])
+ rest = rest[:o]
+ o = len(rest) - 1
+ case o == 0:
+ // add the first line only if it's non-empty
+ beforeLines = append(beforeLines, rest)
+
+ break backward
+ default:
+ o--
+ }
+ }
+
+ return beforeLines
+}
+
+func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte {
+ var afterLines [][]byte
+
+ // Walk the document forward from the highlight to find the following
+ // lines of context.
+ rest := document[offset+len(highlight):]
+forward:
+ for o := 0; o < len(rest) && len(afterLines) <= linesAround; {
+ switch {
+ case rest[o] == '\n':
+ // handle individual lines
+ afterLines = append(afterLines, rest[:o])
+ rest = rest[o+1:]
+ o = 0
+
+ case o == len(rest)-1:
+ // add last line only if it's non-empty
+ afterLines = append(afterLines, rest)
+
+ break forward
+ default:
+ o++
+ }
+ }
+
+ return afterLines
+}
+
+func positionAtEnd(b []byte) (row int, column int) {
+ row = 1
+ column = 1
+
+ for _, c := range b {
+ if c == '\n' {
+ row++
+ column = 1
+ } else {
+ column++
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
new file mode 100644
index 000000000000..80f698db4b08
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
@@ -0,0 +1,42 @@
+package characters
+
+var invalidAsciiTable = [256]bool{
+ 0x00: true,
+ 0x01: true,
+ 0x02: true,
+ 0x03: true,
+ 0x04: true,
+ 0x05: true,
+ 0x06: true,
+ 0x07: true,
+ 0x08: true,
+ // 0x09 TAB
+ // 0x0A LF
+ 0x0B: true,
+ 0x0C: true,
+ // 0x0D CR
+ 0x0E: true,
+ 0x0F: true,
+ 0x10: true,
+ 0x11: true,
+ 0x12: true,
+ 0x13: true,
+ 0x14: true,
+ 0x15: true,
+ 0x16: true,
+ 0x17: true,
+ 0x18: true,
+ 0x19: true,
+ 0x1A: true,
+ 0x1B: true,
+ 0x1C: true,
+ 0x1D: true,
+ 0x1E: true,
+ 0x1F: true,
+ // 0x20 - 0x7E Printable ASCII characters
+ 0x7F: true,
+}
+
+func InvalidAscii(b byte) bool {
+ return invalidAsciiTable[b]
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
new file mode 100644
index 000000000000..db4f45acbf00
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
@@ -0,0 +1,199 @@
+package characters
+
+import (
+ "unicode/utf8"
+)
+
+type utf8Err struct {
+ Index int
+ Size int
+}
+
+func (u utf8Err) Zero() bool {
+ return u.Size == 0
+}
+
+// Verified that a given string is only made of valid UTF-8 characters allowed
+// by the TOML spec:
+//
+// Any Unicode character may be used except those that must be escaped:
+// quotation mark, backslash, and the control characters other than tab (U+0000
+// to U+0008, U+000A to U+001F, U+007F).
+//
+// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early
+// when a character is not allowed.
+//
+// The returned utf8Err is Zero() if the string is valid, or contains the byte
+// index and size of the invalid character.
+//
+// quotation mark => already checked
+// backslash => already checked
+// 0-0x8 => invalid
+// 0x9 => tab, ok
+// 0xA - 0x1F => invalid
+// 0x7F => invalid
+func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
+ // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
+ offset := 0
+ for len(p) >= 8 {
+ // Combining two 32 bit loads allows the same code to be used
+ // for 32 and 64 bit platforms.
+ // The compiler can generate a 32bit load for first32 and second32
+ // on many platforms. See test/codegen/memcombine.go.
+ first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+ second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
+ if (first32|second32)&0x80808080 != 0 {
+ // Found a non ASCII byte (>= RuneSelf).
+ break
+ }
+
+ for i, b := range p[:8] {
+ if InvalidAscii(b) {
+ err.Index = offset + i
+ err.Size = 1
+ return
+ }
+ }
+
+ p = p[8:]
+ offset += 8
+ }
+ n := len(p)
+ for i := 0; i < n; {
+ pi := p[i]
+ if pi < utf8.RuneSelf {
+ if InvalidAscii(pi) {
+ err.Index = offset + i
+ err.Size = 1
+ return
+ }
+ i++
+ continue
+ }
+ x := first[pi]
+ if x == xx {
+ // Illegal starter byte.
+ err.Index = offset + i
+ err.Size = 1
+ return
+ }
+ size := int(x & 7)
+ if i+size > n {
+ // Short or invalid.
+ err.Index = offset + i
+ err.Size = n - i
+ return
+ }
+ accept := acceptRanges[x>>4]
+ if c := p[i+1]; c < accept.lo || accept.hi < c {
+ err.Index = offset + i
+ err.Size = 2
+ return
+ } else if size == 2 {
+ } else if c := p[i+2]; c < locb || hicb < c {
+ err.Index = offset + i
+ err.Size = 3
+ return
+ } else if size == 3 {
+ } else if c := p[i+3]; c < locb || hicb < c {
+ err.Index = offset + i
+ err.Size = 4
+ return
+ }
+ i += size
+ }
+ return
+}
+
+// Return the size of the next rune if valid, 0 otherwise.
+func Utf8ValidNext(p []byte) int {
+ c := p[0]
+
+ if c < utf8.RuneSelf {
+ if InvalidAscii(c) {
+ return 0
+ }
+ return 1
+ }
+
+ x := first[c]
+ if x == xx {
+ // Illegal starter byte.
+ return 0
+ }
+ size := int(x & 7)
+ if size > len(p) {
+ // Short or invalid.
+ return 0
+ }
+ accept := acceptRanges[x>>4]
+ if c := p[1]; c < accept.lo || accept.hi < c {
+ return 0
+ } else if size == 2 {
+ } else if c := p[2]; c < locb || hicb < c {
+ return 0
+ } else if size == 3 {
+ } else if c := p[3]; c < locb || hicb < c {
+ return 0
+ }
+
+ return size
+}
+
+// acceptRange gives the range of valid values for the second byte in a UTF-8
+// sequence.
+type acceptRange struct {
+ lo uint8 // lowest value for second byte.
+ hi uint8 // highest value for second byte.
+}
+
+// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
+var acceptRanges = [16]acceptRange{
+ 0: {locb, hicb},
+ 1: {0xA0, hicb},
+ 2: {locb, 0x9F},
+ 3: {0x90, hicb},
+ 4: {locb, 0x8F},
+}
+
+// first is information about the first byte in a UTF-8 sequence.
+var first = [256]uint8{
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
+ xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
+ s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
+ s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
+ s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
+}
+
+const (
+ // The default lowest and highest continuation byte.
+ locb = 0b10000000
+ hicb = 0b10111111
+
+ // These names of these constants are chosen to give nice alignment in the
+ // table below. The first nibble is an index into acceptRanges or F for
+ // special one-byte cases. The second nibble is the Rune length or the
+ // Status for the special one-byte case.
+ xx = 0xF1 // invalid: size 1
+ as = 0xF0 // ASCII: size 1
+ s1 = 0x02 // accept 0, size 2
+ s2 = 0x13 // accept 1, size 3
+ s3 = 0x03 // accept 0, size 3
+ s4 = 0x23 // accept 2, size 3
+ s5 = 0x34 // accept 3, size 4
+ s6 = 0x04 // accept 0, size 4
+ s7 = 0x44 // accept 4, size 4
+)
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
new file mode 100644
index 000000000000..e38e1131b88f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
@@ -0,0 +1,65 @@
+package danger
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+const maxInt = uintptr(int(^uint(0) >> 1))
+
+func SubsliceOffset(data []byte, subslice []byte) int {
+ datap := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice))
+
+ if hlp.Data < datap.Data {
+ panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data))
+ }
+ offset := hlp.Data - datap.Data
+
+ if offset > maxInt {
+ panic(fmt.Errorf("slice offset larger than int (%d)", offset))
+ }
+
+ intoffset := int(offset)
+
+ if intoffset > datap.Len {
+ panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len))
+ }
+
+ if intoffset+hlp.Len > datap.Len {
+ panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len))
+ }
+
+ return intoffset
+}
+
+func BytesRange(start []byte, end []byte) []byte {
+ if start == nil || end == nil {
+ panic("cannot call BytesRange with nil")
+ }
+ startp := (*reflect.SliceHeader)(unsafe.Pointer(&start))
+ endp := (*reflect.SliceHeader)(unsafe.Pointer(&end))
+
+ if startp.Data > endp.Data {
+ panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data))
+ }
+
+ l := startp.Len
+ endLen := int(endp.Data-startp.Data) + endp.Len
+ if endLen > l {
+ l = endLen
+ }
+
+ if l > startp.Cap {
+ panic(fmt.Errorf("range length is larger than capacity"))
+ }
+
+ return start[:l]
+}
+
+func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
+ // TODO: replace with unsafe.Add when Go 1.17 is released
+ // /~https://github.com/golang/go/issues/40481
+ return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
new file mode 100644
index 000000000000..9d41c28a2f21
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
@@ -0,0 +1,23 @@
+package danger
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// typeID is used as key in encoder and decoder caches to enable using
+// the optimize runtime.mapaccess2_fast64 function instead of the more
+// expensive lookup if we were to use reflect.Type as map key.
+//
+// typeID holds the pointer to the reflect.Type value, which is unique
+// in the program.
+//
+// /~https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
+type TypeID unsafe.Pointer
+
+func MakeTypeID(t reflect.Type) TypeID {
+ // reflect.Type has the fields:
+ // typ unsafe.Pointer
+ // ptr unsafe.Pointer
+ return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
new file mode 100644
index 000000000000..149b17f538b7
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
@@ -0,0 +1,48 @@
+package tracker
+
+import "github.com/pelletier/go-toml/v2/unstable"
+
+// KeyTracker is a tracker that keeps track of the current Key as the AST is
+// walked.
+type KeyTracker struct {
+ k []string
+}
+
+// UpdateTable sets the state of the tracker with the AST table node.
+func (t *KeyTracker) UpdateTable(node *unstable.Node) {
+ t.reset()
+ t.Push(node)
+}
+
+// UpdateArrayTable sets the state of the tracker with the AST array table node.
+func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) {
+ t.reset()
+ t.Push(node)
+}
+
+// Push the given key on the stack.
+func (t *KeyTracker) Push(node *unstable.Node) {
+ it := node.Key()
+ for it.Next() {
+ t.k = append(t.k, string(it.Node().Data))
+ }
+}
+
+// Pop key from stack.
+func (t *KeyTracker) Pop(node *unstable.Node) {
+ it := node.Key()
+ for it.Next() {
+ t.k = t.k[:len(t.k)-1]
+ }
+}
+
+// Key returns the current key
+func (t *KeyTracker) Key() []string {
+ k := make([]string, len(t.k))
+ copy(k, t.k)
+ return k
+}
+
+func (t *KeyTracker) reset() {
+ t.k = t.k[:0]
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
new file mode 100644
index 000000000000..76df2d5b6a95
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
@@ -0,0 +1,358 @@
+package tracker
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ "github.com/pelletier/go-toml/v2/unstable"
+)
+
+type keyKind uint8
+
+const (
+ invalidKind keyKind = iota
+ valueKind
+ tableKind
+ arrayTableKind
+)
+
+func (k keyKind) String() string {
+ switch k {
+ case invalidKind:
+ return "invalid"
+ case valueKind:
+ return "value"
+ case tableKind:
+ return "table"
+ case arrayTableKind:
+ return "array table"
+ }
+ panic("missing keyKind string mapping")
+}
+
+// SeenTracker tracks which keys have been seen with which TOML type to flag
+// duplicates and mismatches according to the spec.
+//
+// Each node in the visited tree is represented by an entry. Each entry has an
+// identifier, which is provided by a counter. Entries are stored in the array
+// entries. As new nodes are discovered (referenced for the first time in the
+// TOML document), entries are created and appended to the array. An entry
+// points to its parent using its id.
+//
+// To find whether a given key (sequence of []byte) has already been visited,
+// the entries are linearly searched, looking for one with the right name and
+// parent id.
+//
+// Given that all keys appear in the document after their parent, it is
+// guaranteed that all descendants of a node are stored after the node, this
+// speeds up the search process.
+//
+// When encountering [[array tables]], the descendants of that node are removed
+// to allow that branch of the tree to be "rediscovered". To maintain the
+// invariant above, the deletion process needs to keep the order of entries.
+// This results in more copies in that case.
+type SeenTracker struct {
+ entries []entry
+ currentIdx int
+}
+
+var pool = sync.Pool{
+ New: func() interface{} {
+ return &SeenTracker{}
+ },
+}
+
+func (s *SeenTracker) reset() {
+ // Always contains a root element at index 0.
+ s.currentIdx = 0
+ if len(s.entries) == 0 {
+ s.entries = make([]entry, 1, 2)
+ } else {
+ s.entries = s.entries[:1]
+ }
+ s.entries[0].child = -1
+ s.entries[0].next = -1
+}
+
+type entry struct {
+ // Use -1 to indicate no child or no sibling.
+ child int
+ next int
+
+ name []byte
+ kind keyKind
+ explicit bool
+ kv bool
+}
+
+// Find the index of the child of parentIdx with key k. Returns -1 if
+// it does not exist.
+func (s *SeenTracker) find(parentIdx int, k []byte) int {
+ for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
+ if bytes.Equal(s.entries[i].name, k) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Remove all descendants of node at position idx.
+func (s *SeenTracker) clear(idx int) {
+ if idx >= len(s.entries) {
+ return
+ }
+
+ for i := s.entries[idx].child; i >= 0; {
+ next := s.entries[i].next
+ n := s.entries[0].next
+ s.entries[0].next = i
+ s.entries[i].next = n
+ s.entries[i].name = nil
+ s.clear(i)
+ i = next
+ }
+
+ s.entries[idx].child = -1
+}
+
+func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int {
+ e := entry{
+ child: -1,
+ next: s.entries[parentIdx].child,
+
+ name: name,
+ kind: kind,
+ explicit: explicit,
+ kv: kv,
+ }
+ var idx int
+ if s.entries[0].next >= 0 {
+ idx = s.entries[0].next
+ s.entries[0].next = s.entries[idx].next
+ s.entries[idx] = e
+ } else {
+ idx = len(s.entries)
+ s.entries = append(s.entries, e)
+ }
+
+ s.entries[parentIdx].child = idx
+
+ return idx
+}
+
+func (s *SeenTracker) setExplicitFlag(parentIdx int) {
+ for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
+ if s.entries[i].kv {
+ s.entries[i].explicit = true
+ s.entries[i].kv = false
+ }
+ s.setExplicitFlag(i)
+ }
+}
+
+// CheckExpression takes a top-level node and checks that it does not contain
+// keys that have been seen in previous calls, and validates that types are
+// consistent. It returns true if it is the first time this node's key is seen.
+// Useful to clear array tables on first use.
+func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) {
+ if s.entries == nil {
+ s.reset()
+ }
+ switch node.Kind {
+ case unstable.KeyValue:
+ return s.checkKeyValue(node)
+ case unstable.Table:
+ return s.checkTable(node)
+ case unstable.ArrayTable:
+ return s.checkArrayTable(node)
+ default:
+ panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind))
+ }
+}
+
+func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) {
+ if s.currentIdx >= 0 {
+ s.setExplicitFlag(s.currentIdx)
+ }
+
+ it := node.Key()
+
+ parentIdx := 0
+
+ // This code is duplicated in checkArrayTable. This is because factoring
+ // it in a function requires to copy the iterator, or allocate it to the
+ // heap, which is not cheap.
+ for it.Next() {
+ if it.IsLast() {
+ break
+ }
+
+ k := it.Node().Data
+
+ idx := s.find(parentIdx, k)
+
+ if idx < 0 {
+ idx = s.create(parentIdx, k, tableKind, false, false)
+ } else {
+ entry := s.entries[idx]
+ if entry.kind == valueKind {
+ return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
+ }
+ }
+ parentIdx = idx
+ }
+
+ k := it.Node().Data
+ idx := s.find(parentIdx, k)
+
+ first := false
+ if idx >= 0 {
+ kind := s.entries[idx].kind
+ if kind != tableKind {
+ return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind)
+ }
+ if s.entries[idx].explicit {
+ return false, fmt.Errorf("toml: table %s already exists", string(k))
+ }
+ s.entries[idx].explicit = true
+ } else {
+ idx = s.create(parentIdx, k, tableKind, true, false)
+ first = true
+ }
+
+ s.currentIdx = idx
+
+ return first, nil
+}
+
+func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) {
+ if s.currentIdx >= 0 {
+ s.setExplicitFlag(s.currentIdx)
+ }
+
+ it := node.Key()
+
+ parentIdx := 0
+
+ for it.Next() {
+ if it.IsLast() {
+ break
+ }
+
+ k := it.Node().Data
+
+ idx := s.find(parentIdx, k)
+
+ if idx < 0 {
+ idx = s.create(parentIdx, k, tableKind, false, false)
+ } else {
+ entry := s.entries[idx]
+ if entry.kind == valueKind {
+ return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
+ }
+ }
+
+ parentIdx = idx
+ }
+
+ k := it.Node().Data
+ idx := s.find(parentIdx, k)
+
+ firstTime := idx < 0
+ if firstTime {
+ idx = s.create(parentIdx, k, arrayTableKind, true, false)
+ } else {
+ kind := s.entries[idx].kind
+ if kind != arrayTableKind {
+ return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k))
+ }
+ s.clear(idx)
+ }
+
+ s.currentIdx = idx
+
+ return firstTime, nil
+}
+
+func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) {
+ parentIdx := s.currentIdx
+ it := node.Key()
+
+ for it.Next() {
+ k := it.Node().Data
+
+ idx := s.find(parentIdx, k)
+
+ if idx < 0 {
+ idx = s.create(parentIdx, k, tableKind, false, true)
+ } else {
+ entry := s.entries[idx]
+ if it.IsLast() {
+ return false, fmt.Errorf("toml: key %s is already defined", string(k))
+ } else if entry.kind != tableKind {
+ return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
+ } else if entry.explicit {
+ return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k))
+ }
+ }
+
+ parentIdx = idx
+ }
+
+ s.entries[parentIdx].kind = valueKind
+
+ value := node.Value()
+
+ switch value.Kind {
+ case unstable.InlineTable:
+ return s.checkInlineTable(value)
+ case unstable.Array:
+ return s.checkArray(value)
+ }
+
+ return false, nil
+}
+
+func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) {
+ it := node.Children()
+ for it.Next() {
+ n := it.Node()
+ switch n.Kind {
+ case unstable.InlineTable:
+ first, err = s.checkInlineTable(n)
+ if err != nil {
+ return false, err
+ }
+ case unstable.Array:
+ first, err = s.checkArray(n)
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ return first, nil
+}
+
+func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) {
+ s = pool.Get().(*SeenTracker)
+ s.reset()
+
+ it := node.Children()
+ for it.Next() {
+ n := it.Node()
+ first, err = s.checkKeyValue(n)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ // As inline tables are self-contained, the tracker does not
+ // need to retain the details of what they contain. The
+ // keyValue element that creates the inline table is kept to
+ // mark the presence of the inline table and prevent
+ // redefinition of its keys: check* functions cannot walk into
+ // a value.
+ pool.Put(s)
+ return first, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
new file mode 100644
index 000000000000..bf0317392f49
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
@@ -0,0 +1 @@
+package tracker
diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go
new file mode 100644
index 000000000000..a856bfdb0d1c
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go
@@ -0,0 +1,122 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/pelletier/go-toml/v2/unstable"
+)
+
+// LocalDate represents a calendar day in no specific timezone.
+type LocalDate struct {
+ Year int
+ Month int
+ Day int
+}
+
+// AsTime converts d into a specific time instance at midnight in zone.
+func (d LocalDate) AsTime(zone *time.Location) time.Time {
+ return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone)
+}
+
+// String returns RFC 3339 representation of d.
+func (d LocalDate) String() string {
+ return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
+}
+
+// MarshalText returns RFC 3339 representation of d.
+func (d LocalDate) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText parses b using RFC 3339 to fill d.
+func (d *LocalDate) UnmarshalText(b []byte) error {
+ res, err := parseLocalDate(b)
+ if err != nil {
+ return err
+ }
+ *d = res
+ return nil
+}
+
+// LocalTime represents a time of day of no specific day in no specific
+// timezone.
+type LocalTime struct {
+ Hour int // Hour of the day: [0; 24[
+ Minute int // Minute of the hour: [0; 60[
+ Second int // Second of the minute: [0; 60[
+ Nanosecond int // Nanoseconds within the second: [0, 1000000000[
+ Precision int // Number of digits to display for Nanosecond.
+}
+
+// String returns RFC 3339 representation of d.
+// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond
+// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number
+// of digits for nanoseconds is provided.
+func (d LocalTime) String() string {
+ s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second)
+
+ if d.Precision > 0 {
+ s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1]
+ } else if d.Nanosecond > 0 {
+ // Nanoseconds are specified, but precision is not provided. Use the
+ // minimum.
+ s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0")
+ }
+
+ return s
+}
+
+// MarshalText returns RFC 3339 representation of d.
+func (d LocalTime) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText parses b using RFC 3339 to fill d.
+func (d *LocalTime) UnmarshalText(b []byte) error {
+ res, left, err := parseLocalTime(b)
+ if err == nil && len(left) != 0 {
+ err = unstable.NewParserError(left, "extra characters")
+ }
+ if err != nil {
+ return err
+ }
+ *d = res
+ return nil
+}
+
+// LocalDateTime represents a time of a specific day in no specific timezone.
+type LocalDateTime struct {
+ LocalDate
+ LocalTime
+}
+
+// AsTime converts d into a specific time instance in zone.
+func (d LocalDateTime) AsTime(zone *time.Location) time.Time {
+ return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone)
+}
+
+// String returns RFC 3339 representation of d.
+func (d LocalDateTime) String() string {
+ return d.LocalDate.String() + "T" + d.LocalTime.String()
+}
+
+// MarshalText returns RFC 3339 representation of d.
+func (d LocalDateTime) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText parses b using RFC 3339 to fill d.
+func (d *LocalDateTime) UnmarshalText(data []byte) error {
+ res, left, err := parseLocalDateTime(data)
+ if err == nil && len(left) != 0 {
+ err = unstable.NewParserError(left, "extra characters")
+ }
+ if err != nil {
+ return err
+ }
+
+ *d = res
+ return nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go
new file mode 100644
index 000000000000..161acd934397
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go
@@ -0,0 +1,1133 @@
+package toml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ "github.com/pelletier/go-toml/v2/internal/characters"
+)
+
+// Marshal serializes a Go value as a TOML document.
+//
+// It is a shortcut for Encoder.Encode() with the default options.
+func Marshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+
+ err := enc.Encode(v)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+// Encoder writes a TOML document to an output stream.
+type Encoder struct {
+ // output
+ w io.Writer
+
+ // global settings
+ tablesInline bool
+ arraysMultiline bool
+ indentSymbol string
+ indentTables bool
+ marshalJsonNumbers bool
+}
+
+// NewEncoder returns a new Encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ indentSymbol: " ",
+ }
+}
+
+// SetTablesInline forces the encoder to emit all tables inline.
+//
+// This behavior can be controlled on an individual struct field basis with the
+// inline tag:
+//
+// MyField `toml:",inline"`
+func (enc *Encoder) SetTablesInline(inline bool) *Encoder {
+ enc.tablesInline = inline
+ return enc
+}
+
+// SetArraysMultiline forces the encoder to emit all arrays with one element per
+// line.
+//
+// This behavior can be controlled on an individual struct field basis with the multiline tag:
+//
+// MyField `multiline:"true"`
+func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder {
+ enc.arraysMultiline = multiline
+ return enc
+}
+
+// SetIndentSymbol defines the string that should be used for indentation. The
+// provided string is repeated for each indentation level. Defaults to two
+// spaces.
+func (enc *Encoder) SetIndentSymbol(s string) *Encoder {
+ enc.indentSymbol = s
+ return enc
+}
+
+// SetIndentTables forces the encoder to intent tables and array tables.
+func (enc *Encoder) SetIndentTables(indent bool) *Encoder {
+ enc.indentTables = indent
+ return enc
+}
+
+// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a
+// float or integer instead of relying on TextMarshaler to emit a string.
+//
+// *Unstable:* This method does not follow the compatibility guarantees of
+// semver. It can be changed or removed without a new major version being
+// issued.
+func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder {
+ enc.marshalJsonNumbers = indent
+ return enc
+}
+
+// Encode writes a TOML representation of v to the stream.
+//
+// If v cannot be represented to TOML it returns an error.
+//
+// # Encoding rules
+//
+// A top level slice containing only maps or structs is encoded as [[table
+// array]].
+//
+// All slices not matching rule 1 are encoded as [array]. As a result, any map
+// or struct they contain is encoded as an {inline table}.
+//
+// Nil interfaces and nil pointers are not supported.
+//
+// Keys in key-values always have one part.
+//
+// Intermediate tables are always printed.
+//
+// By default, strings are encoded as literal string, unless they contain either
+// a newline character or a single quote. In that case they are emitted as
+// quoted strings.
+//
+// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so
+// results in an error. This rule exists because the TOML specification only
+// requires parsers to support at least the 64 bits integer range. Allowing
+// larger numbers would create non-standard TOML documents, which may not be
+// readable (at best) by other implementations. To encode such numbers, a
+// solution is a custom type that implements encoding.TextMarshaler.
+//
+// When encoding structs, fields are encoded in order of definition, with their
+// exact name.
+//
+// Tables and array tables are separated by empty lines. However, consecutive
+// subtables definitions are not. For example:
+//
+// [top1]
+//
+// [top2]
+// [top2.child1]
+//
+// [[array]]
+//
+// [[array]]
+// [array.child2]
+//
+// # Struct tags
+//
+// The encoding of each public struct field can be customized by the format
+// string in the "toml" key of the struct field's tag. This follows
+// encoding/json's convention. The format string starts with the name of the
+// field, optionally followed by a comma-separated list of options. The name may
+// be empty in order to provide options without overriding the default name.
+//
+// The "multiline" option emits strings as quoted multi-line TOML strings. It
+// has no effect on fields that would not be encoded as strings.
+//
+// The "inline" option turns fields that would be emitted as tables into inline
+// tables instead. It has no effect on other fields.
+//
+// The "omitempty" option prevents empty values or groups from being emitted.
+//
+// The "commented" option prefixes the value and all its children with a comment
+// symbol.
+//
+// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit
+// a TOML comment before the value being annotated. Comments are ignored inside
+// inline tables. For array tables, the comment is only present before the first
+// element of the array.
+func (enc *Encoder) Encode(v interface{}) error {
+ var (
+ b []byte
+ ctx encoderCtx
+ )
+
+ ctx.inline = enc.tablesInline
+
+ if v == nil {
+ return fmt.Errorf("toml: cannot encode a nil interface")
+ }
+
+ b, err := enc.encode(b, ctx, reflect.ValueOf(v))
+ if err != nil {
+ return err
+ }
+
+ _, err = enc.w.Write(b)
+ if err != nil {
+ return fmt.Errorf("toml: cannot write: %w", err)
+ }
+
+ return nil
+}
+
+type valueOptions struct {
+ multiline bool
+ omitempty bool
+ commented bool
+ comment string
+}
+
+type encoderCtx struct {
+ // Current top-level key.
+ parentKey []string
+
+ // Key that should be used for a KV.
+ key string
+ // Extra flag to account for the empty string
+ hasKey bool
+
+ // Set to true to indicate that the encoder is inside a KV, so that all
+ // tables need to be inlined.
+ insideKv bool
+
+ // Set to true to skip the first table header in an array table.
+ skipTableHeader bool
+
+ // Should the next table be encoded as inline
+ inline bool
+
+ // Indentation level
+ indent int
+
+ // Prefix the current value with a comment.
+ commented bool
+
+ // Options coming from struct tags
+ options valueOptions
+}
+
+func (ctx *encoderCtx) shiftKey() {
+ if ctx.hasKey {
+ ctx.parentKey = append(ctx.parentKey, ctx.key)
+ ctx.clearKey()
+ }
+}
+
+func (ctx *encoderCtx) setKey(k string) {
+ ctx.key = k
+ ctx.hasKey = true
+}
+
+func (ctx *encoderCtx) clearKey() {
+ ctx.key = ""
+ ctx.hasKey = false
+}
+
+func (ctx *encoderCtx) isRoot() bool {
+ return len(ctx.parentKey) == 0 && !ctx.hasKey
+}
+
+func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ i := v.Interface()
+
+ switch x := i.(type) {
+ case time.Time:
+ if x.Nanosecond() > 0 {
+ return x.AppendFormat(b, time.RFC3339Nano), nil
+ }
+ return x.AppendFormat(b, time.RFC3339), nil
+ case LocalTime:
+ return append(b, x.String()...), nil
+ case LocalDate:
+ return append(b, x.String()...), nil
+ case LocalDateTime:
+ return append(b, x.String()...), nil
+ case json.Number:
+ if enc.marshalJsonNumbers {
+ if x == "" { /// Useful zero value.
+ return append(b, "0"...), nil
+ } else if v, err := x.Int64(); err == nil {
+ return enc.encode(b, ctx, reflect.ValueOf(v))
+ } else if f, err := x.Float64(); err == nil {
+ return enc.encode(b, ctx, reflect.ValueOf(f))
+ } else {
+ return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x)
+ }
+ }
+ }
+
+ hasTextMarshaler := v.Type().Implements(textMarshalerType)
+ if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) {
+ if !hasTextMarshaler {
+ v = v.Addr()
+ }
+
+ if ctx.isRoot() {
+ return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type())
+ }
+
+ text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return nil, err
+ }
+
+ b = enc.encodeString(b, string(text), ctx.options)
+
+ return b, nil
+ }
+
+ switch v.Kind() {
+ // containers
+ case reflect.Map:
+ return enc.encodeMap(b, ctx, v)
+ case reflect.Struct:
+ return enc.encodeStruct(b, ctx, v)
+ case reflect.Slice, reflect.Array:
+ return enc.encodeSlice(b, ctx, v)
+ case reflect.Interface:
+ if v.IsNil() {
+ return nil, fmt.Errorf("toml: encoding a nil interface is not supported")
+ }
+
+ return enc.encode(b, ctx, v.Elem())
+ case reflect.Ptr:
+ if v.IsNil() {
+ return enc.encode(b, ctx, reflect.Zero(v.Type().Elem()))
+ }
+
+ return enc.encode(b, ctx, v.Elem())
+
+ // values
+ case reflect.String:
+ b = enc.encodeString(b, v.String(), ctx.options)
+ case reflect.Float32:
+ f := v.Float()
+
+ if math.IsNaN(f) {
+ b = append(b, "nan"...)
+ } else if f > math.MaxFloat32 {
+ b = append(b, "inf"...)
+ } else if f < -math.MaxFloat32 {
+ b = append(b, "-inf"...)
+ } else if math.Trunc(f) == f {
+ b = strconv.AppendFloat(b, f, 'f', 1, 32)
+ } else {
+ b = strconv.AppendFloat(b, f, 'f', -1, 32)
+ }
+ case reflect.Float64:
+ f := v.Float()
+ if math.IsNaN(f) {
+ b = append(b, "nan"...)
+ } else if f > math.MaxFloat64 {
+ b = append(b, "inf"...)
+ } else if f < -math.MaxFloat64 {
+ b = append(b, "-inf"...)
+ } else if math.Trunc(f) == f {
+ b = strconv.AppendFloat(b, f, 'f', 1, 64)
+ } else {
+ b = strconv.AppendFloat(b, f, 'f', -1, 64)
+ }
+ case reflect.Bool:
+ if v.Bool() {
+ b = append(b, "true"...)
+ } else {
+ b = append(b, "false"...)
+ }
+ case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
+ x := v.Uint()
+ if x > uint64(math.MaxInt64) {
+ return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64))
+ }
+ b = strconv.AppendUint(b, x, 10)
+ case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int:
+ b = strconv.AppendInt(b, v.Int(), 10)
+ default:
+ return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind())
+ }
+
+ return b, nil
+}
+
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Map:
+ return v.IsNil()
+ default:
+ return false
+ }
+}
+
+func shouldOmitEmpty(options valueOptions, v reflect.Value) bool {
+ return options.omitempty && isEmptyValue(v)
+}
+
+func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) {
+ var err error
+
+ if !ctx.inline {
+ b = enc.encodeComment(ctx.indent, options.comment, b)
+ b = enc.commented(ctx.commented, b)
+ b = enc.indent(ctx.indent, b)
+ }
+
+ b = enc.encodeKey(b, ctx.key)
+ b = append(b, " = "...)
+
+ // create a copy of the context because the value of a KV shouldn't
+ // modify the global context.
+ subctx := ctx
+ subctx.insideKv = true
+ subctx.shiftKey()
+ subctx.options = options
+
+ b, err = enc.encode(b, subctx, v)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (enc *Encoder) commented(commented bool, b []byte) []byte {
+ if commented {
+ return append(b, "# "...)
+ }
+ return b
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Struct:
+ return isEmptyStruct(v)
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func isEmptyStruct(v reflect.Value) bool {
+ // TODO: merge with walkStruct and cache.
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ fieldType := typ.Field(i)
+
+ // only consider exported fields
+ if fieldType.PkgPath != "" {
+ continue
+ }
+
+ tag := fieldType.Tag.Get("toml")
+
+ // special field name to skip field
+ if tag == "-" {
+ continue
+ }
+
+ f := v.Field(i)
+
+ if !isEmptyValue(f) {
+ return false
+ }
+ }
+
+ return true
+}
+
+const literalQuote = '\''
+
+func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte {
+ if needsQuoting(v) {
+ return enc.encodeQuotedString(options.multiline, b, v)
+ }
+
+ return enc.encodeLiteralString(b, v)
+}
+
+func needsQuoting(v string) bool {
+ // TODO: vectorize
+ for _, b := range []byte(v) {
+ if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) {
+ return true
+ }
+ }
+ return false
+}
+
+// caller should have checked that the string does not contain new lines or ' .
+func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte {
+ b = append(b, literalQuote)
+ b = append(b, v...)
+ b = append(b, literalQuote)
+
+ return b
+}
+
+func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte {
+ stringQuote := `"`
+
+ if multiline {
+ stringQuote = `"""`
+ }
+
+ b = append(b, stringQuote...)
+ if multiline {
+ b = append(b, '\n')
+ }
+
+ const (
+ hextable = "0123456789ABCDEF"
+ // U+0000 to U+0008, U+000A to U+001F, U+007F
+ nul = 0x0
+ bs = 0x8
+ lf = 0xa
+ us = 0x1f
+ del = 0x7f
+ )
+
+ for _, r := range []byte(v) {
+ switch r {
+ case '\\':
+ b = append(b, `\\`...)
+ case '"':
+ b = append(b, `\"`...)
+ case '\b':
+ b = append(b, `\b`...)
+ case '\f':
+ b = append(b, `\f`...)
+ case '\n':
+ if multiline {
+ b = append(b, r)
+ } else {
+ b = append(b, `\n`...)
+ }
+ case '\r':
+ b = append(b, `\r`...)
+ case '\t':
+ b = append(b, `\t`...)
+ default:
+ switch {
+ case r >= nul && r <= bs, r >= lf && r <= us, r == del:
+ b = append(b, `\u00`...)
+ b = append(b, hextable[r>>4])
+ b = append(b, hextable[r&0x0f])
+ default:
+ b = append(b, r)
+ }
+ }
+ }
+
+ b = append(b, stringQuote...)
+
+ return b
+}
+
+// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ .
+func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte {
+ return append(b, v...)
+}
+
+func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) {
+ if len(ctx.parentKey) == 0 {
+ return b, nil
+ }
+
+ b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
+
+ b = enc.commented(ctx.commented, b)
+
+ b = enc.indent(ctx.indent, b)
+
+ b = append(b, '[')
+
+ b = enc.encodeKey(b, ctx.parentKey[0])
+
+ for _, k := range ctx.parentKey[1:] {
+ b = append(b, '.')
+ b = enc.encodeKey(b, k)
+ }
+
+ b = append(b, "]\n"...)
+
+ return b, nil
+}
+
+//nolint:cyclop
+func (enc *Encoder) encodeKey(b []byte, k string) []byte {
+ needsQuotation := false
+ cannotUseLiteral := false
+
+ if len(k) == 0 {
+ return append(b, "''"...)
+ }
+
+ for _, c := range k {
+ if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' {
+ continue
+ }
+
+ if c == literalQuote {
+ cannotUseLiteral = true
+ }
+
+ needsQuotation = true
+ }
+
+ if needsQuotation && needsQuoting(k) {
+ cannotUseLiteral = true
+ }
+
+ switch {
+ case cannotUseLiteral:
+ return enc.encodeQuotedString(false, b, k)
+ case needsQuotation:
+ return enc.encodeLiteralString(b, k)
+ default:
+ return enc.encodeUnquotedKey(b, k)
+ }
+}
+
+func (enc *Encoder) keyToString(k reflect.Value) (string, error) {
+ keyType := k.Type()
+ switch {
+ case keyType.Kind() == reflect.String:
+ return k.String(), nil
+
+ case keyType.Implements(textMarshalerType):
+ keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err)
+ }
+ return string(keyB), nil
+
+ case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64:
+ return strconv.FormatInt(k.Int(), 10), nil
+
+ case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64:
+ return strconv.FormatUint(k.Uint(), 10), nil
+
+ case keyType.Kind() == reflect.Float32:
+ return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil
+
+ case keyType.Kind() == reflect.Float64:
+ return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil
+ }
+ return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind())
+}
+
+func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ var (
+ t table
+ emptyValueOptions valueOptions
+ )
+
+ iter := v.MapRange()
+ for iter.Next() {
+ v := iter.Value()
+
+ if isNil(v) {
+ continue
+ }
+
+ k, err := enc.keyToString(iter.Key())
+ if err != nil {
+ return nil, err
+ }
+
+ if willConvertToTableOrArrayTable(ctx, v) {
+ t.pushTable(k, v, emptyValueOptions)
+ } else {
+ t.pushKV(k, v, emptyValueOptions)
+ }
+ }
+
+ sortEntriesByKey(t.kvs)
+ sortEntriesByKey(t.tables)
+
+ return enc.encodeTable(b, ctx, t)
+}
+
+func sortEntriesByKey(e []entry) {
+ slices.SortFunc(e, func(a, b entry) int {
+ return strings.Compare(a.Key, b.Key)
+ })
+}
+
+type entry struct {
+ Key string
+ Value reflect.Value
+ Options valueOptions
+}
+
+type table struct {
+ kvs []entry
+ tables []entry
+}
+
+func (t *table) pushKV(k string, v reflect.Value, options valueOptions) {
+ for _, e := range t.kvs {
+ if e.Key == k {
+ return
+ }
+ }
+
+ t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options})
+}
+
+func (t *table) pushTable(k string, v reflect.Value, options valueOptions) {
+ for _, e := range t.tables {
+ if e.Key == k {
+ return
+ }
+ }
+ t.tables = append(t.tables, entry{Key: k, Value: v, Options: options})
+}
+
+func walkStruct(ctx encoderCtx, t *table, v reflect.Value) {
+ // TODO: cache this
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ fieldType := typ.Field(i)
+
+ // only consider exported fields
+ if fieldType.PkgPath != "" {
+ continue
+ }
+
+ tag := fieldType.Tag.Get("toml")
+
+ // special field name to skip field
+ if tag == "-" {
+ continue
+ }
+
+ k, opts := parseTag(tag)
+ if !isValidName(k) {
+ k = ""
+ }
+
+ f := v.Field(i)
+
+ if k == "" {
+ if fieldType.Anonymous {
+ if fieldType.Type.Kind() == reflect.Struct {
+ walkStruct(ctx, t, f)
+ } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct {
+ walkStruct(ctx, t, f.Elem())
+ }
+ continue
+ } else {
+ k = fieldType.Name
+ }
+ }
+
+ if isNil(f) {
+ continue
+ }
+
+ options := valueOptions{
+ multiline: opts.multiline,
+ omitempty: opts.omitempty,
+ commented: opts.commented,
+ comment: fieldType.Tag.Get("comment"),
+ }
+
+ if opts.inline || !willConvertToTableOrArrayTable(ctx, f) {
+ t.pushKV(k, f, options)
+ } else {
+ t.pushTable(k, f, options)
+ }
+ }
+}
+
+func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ var t table
+
+ walkStruct(ctx, &t, v)
+
+ return enc.encodeTable(b, ctx, t)
+}
+
+func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte {
+ for len(comment) > 0 {
+ var line string
+ idx := strings.IndexByte(comment, '\n')
+ if idx >= 0 {
+ line = comment[:idx]
+ comment = comment[idx+1:]
+ } else {
+ line = comment
+ comment = ""
+ }
+ b = enc.indent(indent, b)
+ b = append(b, "# "...)
+ b = append(b, line...)
+ b = append(b, '\n')
+ }
+ return b
+}
+
+func isValidName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ case !unicode.IsLetter(c) && !unicode.IsDigit(c):
+ return false
+ }
+ }
+ return true
+}
+
+type tagOptions struct {
+ multiline bool
+ inline bool
+ omitempty bool
+ commented bool
+}
+
+func parseTag(tag string) (string, tagOptions) {
+ opts := tagOptions{}
+
+ idx := strings.Index(tag, ",")
+ if idx == -1 {
+ return tag, opts
+ }
+
+ raw := tag[idx+1:]
+ tag = string(tag[:idx])
+ for raw != "" {
+ var o string
+ i := strings.Index(raw, ",")
+ if i >= 0 {
+ o, raw = raw[:i], raw[i+1:]
+ } else {
+ o, raw = raw, ""
+ }
+ switch o {
+ case "multiline":
+ opts.multiline = true
+ case "inline":
+ opts.inline = true
+ case "omitempty":
+ opts.omitempty = true
+ case "commented":
+ opts.commented = true
+ }
+ }
+
+ return tag, opts
+}
+
+func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) {
+ var err error
+
+ ctx.shiftKey()
+
+ if ctx.insideKv || (ctx.inline && !ctx.isRoot()) {
+ return enc.encodeTableInline(b, ctx, t)
+ }
+
+ if !ctx.skipTableHeader {
+ b, err = enc.encodeTableHeader(ctx, b)
+ if err != nil {
+ return nil, err
+ }
+
+ if enc.indentTables && len(ctx.parentKey) > 0 {
+ ctx.indent++
+ }
+ }
+ ctx.skipTableHeader = false
+
+ hasNonEmptyKV := false
+ for _, kv := range t.kvs {
+ if shouldOmitEmpty(kv.Options, kv.Value) {
+ continue
+ }
+ hasNonEmptyKV = true
+
+ ctx.setKey(kv.Key)
+ ctx2 := ctx
+ ctx2.commented = kv.Options.commented || ctx2.commented
+
+ b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ b = append(b, '\n')
+ }
+
+ first := true
+ for _, table := range t.tables {
+ if shouldOmitEmpty(table.Options, table.Value) {
+ continue
+ }
+ if first {
+ first = false
+ if hasNonEmptyKV {
+ b = append(b, '\n')
+ }
+ } else {
+ b = append(b, "\n"...)
+ }
+
+ ctx.setKey(table.Key)
+
+ ctx.options = table.Options
+ ctx2 := ctx
+ ctx2.commented = ctx2.commented || ctx.options.commented
+
+ b, err = enc.encode(b, ctx2, table.Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return b, nil
+}
+
+func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) {
+ var err error
+
+ b = append(b, '{')
+
+ first := true
+ for _, kv := range t.kvs {
+ if shouldOmitEmpty(kv.Options, kv.Value) {
+ continue
+ }
+
+ if first {
+ first = false
+ } else {
+ b = append(b, `, `...)
+ }
+
+ ctx.setKey(kv.Key)
+
+ b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(t.tables) > 0 {
+ panic("inline table cannot contain nested tables, only key-values")
+ }
+
+ b = append(b, "}"...)
+
+ return b, nil
+}
+
+func willConvertToTable(ctx encoderCtx, v reflect.Value) bool {
+ if !v.IsValid() {
+ return false
+ }
+ if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) {
+ return false
+ }
+
+ t := v.Type()
+ switch t.Kind() {
+ case reflect.Map, reflect.Struct:
+ return !ctx.inline
+ case reflect.Interface:
+ return willConvertToTable(ctx, v.Elem())
+ case reflect.Ptr:
+ if v.IsNil() {
+ return false
+ }
+
+ return willConvertToTable(ctx, v.Elem())
+ default:
+ return false
+ }
+}
+
+func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool {
+ if ctx.insideKv {
+ return false
+ }
+ t := v.Type()
+
+ if t.Kind() == reflect.Interface {
+ return willConvertToTableOrArrayTable(ctx, v.Elem())
+ }
+
+ if t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
+ if v.Len() == 0 {
+ // An empty slice should be a kv = [].
+ return false
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ t := willConvertToTable(ctx, v.Index(i))
+
+ if !t {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ return willConvertToTable(ctx, v)
+}
+
+func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ if v.Len() == 0 {
+ b = append(b, "[]"...)
+
+ return b, nil
+ }
+
+ if willConvertToTableOrArrayTable(ctx, v) {
+ return enc.encodeSliceAsArrayTable(b, ctx, v)
+ }
+
+ return enc.encodeSliceAsArray(b, ctx, v)
+}
+
+// caller should have checked that v is a slice that only contains values that
+// encode into tables.
+func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ ctx.shiftKey()
+
+ scratch := make([]byte, 0, 64)
+
+ scratch = enc.commented(ctx.commented, scratch)
+
+ if enc.indentTables {
+ scratch = enc.indent(ctx.indent, scratch)
+ }
+
+ scratch = append(scratch, "[["...)
+
+ for i, k := range ctx.parentKey {
+ if i > 0 {
+ scratch = append(scratch, '.')
+ }
+
+ scratch = enc.encodeKey(scratch, k)
+ }
+
+ scratch = append(scratch, "]]\n"...)
+ ctx.skipTableHeader = true
+
+ b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
+
+ if enc.indentTables {
+ ctx.indent++
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ if i != 0 {
+ b = append(b, "\n"...)
+ }
+
+ b = append(b, scratch...)
+
+ var err error
+ b, err = enc.encode(b, ctx, v.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return b, nil
+}
+
+func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ multiline := ctx.options.multiline || enc.arraysMultiline
+ separator := ", "
+
+ b = append(b, '[')
+
+ subCtx := ctx
+ subCtx.options = valueOptions{}
+
+ if multiline {
+ separator = ",\n"
+
+ b = append(b, '\n')
+
+ subCtx.indent++
+ }
+
+ var err error
+ first := true
+
+ for i := 0; i < v.Len(); i++ {
+ if first {
+ first = false
+ } else {
+ b = append(b, separator...)
+ }
+
+ if multiline {
+ b = enc.indent(subCtx.indent, b)
+ }
+
+ b, err = enc.encode(b, subCtx, v.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if multiline {
+ b = append(b, '\n')
+ b = enc.indent(ctx.indent, b)
+ }
+
+ b = append(b, ']')
+
+ return b, nil
+}
+
+func (enc *Encoder) indent(level int, b []byte) []byte {
+ for i := 0; i < level; i++ {
+ b = append(b, enc.indentSymbol...)
+ }
+
+ return b
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go
new file mode 100644
index 000000000000..802e7e4d1507
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/strict.go
@@ -0,0 +1,107 @@
+package toml
+
+import (
+ "github.com/pelletier/go-toml/v2/internal/danger"
+ "github.com/pelletier/go-toml/v2/internal/tracker"
+ "github.com/pelletier/go-toml/v2/unstable"
+)
+
+type strict struct {
+ Enabled bool
+
+ // Tracks the current key being processed.
+ key tracker.KeyTracker
+
+ missing []unstable.ParserError
+}
+
+func (s *strict) EnterTable(node *unstable.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.UpdateTable(node)
+}
+
+func (s *strict) EnterArrayTable(node *unstable.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.UpdateArrayTable(node)
+}
+
+func (s *strict) EnterKeyValue(node *unstable.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.Push(node)
+}
+
+func (s *strict) ExitKeyValue(node *unstable.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.Pop(node)
+}
+
+func (s *strict) MissingTable(node *unstable.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.missing = append(s.missing, unstable.ParserError{
+ Highlight: keyLocation(node),
+ Message: "missing table",
+ Key: s.key.Key(),
+ })
+}
+
+func (s *strict) MissingField(node *unstable.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.missing = append(s.missing, unstable.ParserError{
+ Highlight: keyLocation(node),
+ Message: "missing field",
+ Key: s.key.Key(),
+ })
+}
+
+func (s *strict) Error(doc []byte) error {
+ if !s.Enabled || len(s.missing) == 0 {
+ return nil
+ }
+
+ err := &StrictMissingError{
+ Errors: make([]DecodeError, 0, len(s.missing)),
+ }
+
+ for _, derr := range s.missing {
+ derr := derr
+ err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr))
+ }
+
+ return err
+}
+
+func keyLocation(node *unstable.Node) []byte {
+ k := node.Key()
+
+ hasOne := k.Next()
+ if !hasOne {
+ panic("should not be called with empty key")
+ }
+
+ start := k.Node().Data
+ end := k.Node().Data
+
+ for k.Next() {
+ end = k.Node().Data
+ }
+
+ return danger.BytesRange(start, end)
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/vendor/github.com/pelletier/go-toml/v2/toml.abnf
new file mode 100644
index 000000000000..473f3749e8b0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/toml.abnf
@@ -0,0 +1,243 @@
+;; This document describes TOML's syntax, using the ABNF format (defined in
+;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt).
+;;
+;; All valid TOML documents will match this description, however certain
+;; invalid documents would need to be rejected as per the semantics described
+;; in the supporting text description.
+
+;; It is possible to try this grammar interactively, using instaparse.
+;; http://instaparse.mojombo.com/
+;;
+;; To do so, in the lower right, click on Options and change `:input-format` to
+;; ':abnf'. Then paste this entire ABNF document into the grammar entry box
+;; (above the options). Then you can type or paste a sample TOML document into
+;; the beige box on the left. Tada!
+
+;; Overall Structure
+
+toml = expression *( newline expression )
+
+expression = ws [ comment ]
+expression =/ ws keyval ws [ comment ]
+expression =/ ws table ws [ comment ]
+
+;; Whitespace
+
+ws = *wschar
+wschar = %x20 ; Space
+wschar =/ %x09 ; Horizontal tab
+
+;; Newline
+
+newline = %x0A ; LF
+newline =/ %x0D.0A ; CRLF
+
+;; Comment
+
+comment-start-symbol = %x23 ; #
+non-ascii = %x80-D7FF / %xE000-10FFFF
+non-eol = %x09 / %x20-7F / non-ascii
+
+comment = comment-start-symbol *non-eol
+
+;; Key-Value pairs
+
+keyval = key keyval-sep val
+
+key = simple-key / dotted-key
+simple-key = quoted-key / unquoted-key
+
+unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+quoted-key = basic-string / literal-string
+dotted-key = simple-key 1*( dot-sep simple-key )
+
+dot-sep = ws %x2E ws ; . Period
+keyval-sep = ws %x3D ws ; =
+
+val = string / boolean / array / inline-table / date-time / float / integer
+
+;; String
+
+string = ml-basic-string / basic-string / ml-literal-string / literal-string
+
+;; Basic String
+
+basic-string = quotation-mark *basic-char quotation-mark
+
+quotation-mark = %x22 ; "
+
+basic-char = basic-unescaped / escaped
+basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+escaped = escape escape-seq-char
+
+escape = %x5C ; \
+escape-seq-char = %x22 ; " quotation mark U+0022
+escape-seq-char =/ %x5C ; \ reverse solidus U+005C
+escape-seq-char =/ %x62 ; b backspace U+0008
+escape-seq-char =/ %x66 ; f form feed U+000C
+escape-seq-char =/ %x6E ; n line feed U+000A
+escape-seq-char =/ %x72 ; r carriage return U+000D
+escape-seq-char =/ %x74 ; t tab U+0009
+escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
+escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
+
+;; Multiline Basic String
+
+ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
+ ml-basic-string-delim
+ml-basic-string-delim = 3quotation-mark
+ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
+
+mlb-content = mlb-char / newline / mlb-escaped-nl
+mlb-char = mlb-unescaped / escaped
+mlb-quotes = 1*2quotation-mark
+mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+mlb-escaped-nl = escape ws newline *( wschar / newline )
+
+;; Literal String
+
+literal-string = apostrophe *literal-char apostrophe
+
+apostrophe = %x27 ; ' apostrophe
+
+literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
+
+;; Multiline Literal String
+
+ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
+ ml-literal-string-delim
+ml-literal-string-delim = 3apostrophe
+ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
+
+mll-content = mll-char / newline
+mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
+mll-quotes = 1*2apostrophe
+
+;; Integer
+
+integer = dec-int / hex-int / oct-int / bin-int
+
+minus = %x2D ; -
+plus = %x2B ; +
+underscore = %x5F ; _
+digit1-9 = %x31-39 ; 1-9
+digit0-7 = %x30-37 ; 0-7
+digit0-1 = %x30-31 ; 0-1
+
+hex-prefix = %x30.78 ; 0x
+oct-prefix = %x30.6F ; 0o
+bin-prefix = %x30.62 ; 0b
+
+dec-int = [ minus / plus ] unsigned-dec-int
+unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT )
+
+hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG )
+oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 )
+bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 )
+
+;; Float
+
+float = float-int-part ( exp / frac [ exp ] )
+float =/ special-float
+
+float-int-part = dec-int
+frac = decimal-point zero-prefixable-int
+decimal-point = %x2E ; .
+zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT )
+
+exp = "e" float-exp-part
+float-exp-part = [ minus / plus ] zero-prefixable-int
+
+special-float = [ minus / plus ] ( inf / nan )
+inf = %x69.6e.66 ; inf
+nan = %x6e.61.6e ; nan
+
+;; Boolean
+
+boolean = true / false
+
+true = %x74.72.75.65 ; true
+false = %x66.61.6C.73.65 ; false
+
+;; Date and Time (as defined in RFC 3339)
+
+date-time = offset-date-time / local-date-time / local-date / local-time
+
+date-fullyear = 4DIGIT
+date-month = 2DIGIT ; 01-12
+date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
+time-delim = "T" / %x20 ; T, t, or space
+time-hour = 2DIGIT ; 00-23
+time-minute = 2DIGIT ; 00-59
+time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules
+time-secfrac = "." 1*DIGIT
+time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
+time-offset = "Z" / time-numoffset
+
+partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ]
+full-date = date-fullyear "-" date-month "-" date-mday
+full-time = partial-time time-offset
+
+;; Offset Date-Time
+
+offset-date-time = full-date time-delim full-time
+
+;; Local Date-Time
+
+local-date-time = full-date time-delim partial-time
+
+;; Local Date
+
+local-date = full-date
+
+;; Local Time
+
+local-time = partial-time
+
+;; Array
+
+array = array-open [ array-values ] ws-comment-newline array-close
+
+array-open = %x5B ; [
+array-close = %x5D ; ]
+
+array-values = ws-comment-newline val ws-comment-newline array-sep array-values
+array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ]
+
+array-sep = %x2C ; , Comma
+
+ws-comment-newline = *( wschar / [ comment ] newline )
+
+;; Table
+
+table = std-table / array-table
+
+;; Standard Table
+
+std-table = std-table-open key std-table-close
+
+std-table-open = %x5B ws ; [ Left square bracket
+std-table-close = ws %x5D ; ] Right square bracket
+
+;; Inline Table
+
+inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
+
+inline-table-open = %x7B ws ; {
+inline-table-close = ws %x7D ; }
+inline-table-sep = ws %x2C ws ; , Comma
+
+inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
+
+;; Array Table
+
+array-table = array-table-open key array-table-close
+
+array-table-open = %x5B.5B ws ; [[ Double left square bracket
+array-table-close = ws %x5D.5D ; ]] Double right square bracket
+
+;; Built-in ABNF terms, reproduced here for clarity
+
+ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
+DIGIT = %x30-39 ; 0-9
+HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go
new file mode 100644
index 000000000000..3c6b8fe57021
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/types.go
@@ -0,0 +1,14 @@
+package toml
+
+import (
+ "encoding"
+ "reflect"
+ "time"
+)
+
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil))
+var sliceInterfaceType = reflect.TypeOf([]interface{}(nil))
+var stringType = reflect.TypeOf("")
diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
new file mode 100644
index 000000000000..c3df8bee1cfb
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
@@ -0,0 +1,1334 @@
+package toml
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/pelletier/go-toml/v2/internal/danger"
+ "github.com/pelletier/go-toml/v2/internal/tracker"
+ "github.com/pelletier/go-toml/v2/unstable"
+)
+
+// Unmarshal deserializes a TOML document into a Go value.
+//
+// It is a shortcut for Decoder.Decode() with the default options.
+func Unmarshal(data []byte, v interface{}) error {
+ d := decoder{}
+ d.p.Reset(data)
+ return d.FromParser(v)
+}
+
+// Decoder reads and decode a TOML document from an input stream.
+type Decoder struct {
+ // input
+ r io.Reader
+
+ // global settings
+ strict bool
+
+ // toggles unmarshaler interface
+ unmarshalerInterface bool
+}
+
+// NewDecoder creates a new Decoder that will read from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the
+// destination is a struct and the input contains a key that does not match a
+// non-ignored field.
+//
+// In that case, the Decoder returns a StrictMissingError that can be used to
+// retrieve the individual errors as well as generate a human readable
+// description of the missing fields.
+func (d *Decoder) DisallowUnknownFields() *Decoder {
+ d.strict = true
+ return d
+}
+
+// EnableUnmarshalerInterface allows to enable unmarshaler interface.
+//
+// With this feature enabled, types implementing the unstable/Unmarshaler
+// interface can be decoded from any structure of the document. It allows types
+// that don't have a straightfoward TOML representation to provide their own
+// decoding logic.
+//
+// Currently, types can only decode from a single value. Tables and array tables
+// are not supported.
+//
+// *Unstable:* This method does not follow the compatibility guarantees of
+// semver. It can be changed or removed without a new major version being
+// issued.
+func (d *Decoder) EnableUnmarshalerInterface() *Decoder {
+ d.unmarshalerInterface = true
+ return d
+}
+
+// Decode the whole content of r into v.
+//
+// By default, values in the document that don't exist in the target Go value
+// are ignored. See Decoder.DisallowUnknownFields() to change this behavior.
+//
+// When a TOML local date, time, or date-time is decoded into a time.Time, its
+// value is represented in time.Local timezone. Otherwise the appropriate Local*
+// structure is used. For time values, precision up to the nanosecond is
+// supported by truncating extra digits.
+//
+// Empty tables decoded in an interface{} create an empty initialized
+// map[string]interface{}.
+//
+// Types implementing the encoding.TextUnmarshaler interface are decoded from a
+// TOML string.
+//
+// When decoding a number, go-toml will return an error if the number is out of
+// bounds for the target type (which includes negative numbers when decoding
+// into an unsigned int).
+//
+// If an error occurs while decoding the content of the document, this function
+// returns a toml.DecodeError, providing context about the issue. When using
+// strict mode and a field is missing, a `toml.StrictMissingError` is
+// returned. In any other case, this function returns a standard Go error.
+//
+// # Type mapping
+//
+// List of supported TOML types and their associated accepted Go types:
+//
+// String -> string
+// Integer -> uint*, int*, depending on size
+// Float -> float*, depending on size
+// Boolean -> bool
+// Offset Date-Time -> time.Time
+// Local Date-time -> LocalDateTime, time.Time
+// Local Date -> LocalDate, time.Time
+// Local Time -> LocalTime, time.Time
+// Array -> slice and array, depending on elements types
+// Table -> map and struct
+// Inline Table -> same as Table
+// Array of Tables -> same as Array and Table
+func (d *Decoder) Decode(v interface{}) error {
+ b, err := io.ReadAll(d.r)
+ if err != nil {
+ return fmt.Errorf("toml: %w", err)
+ }
+
+ dec := decoder{
+ strict: strict{
+ Enabled: d.strict,
+ },
+ unmarshalerInterface: d.unmarshalerInterface,
+ }
+ dec.p.Reset(b)
+
+ return dec.FromParser(v)
+}
+
+type decoder struct {
+ // Which parser instance in use for this decoding session.
+ p unstable.Parser
+
+ // Flag indicating that the current expression is stashed.
+ // If set to true, calling nextExpr will not actually pull a new expression
+ // but turn off the flag instead.
+ stashedExpr bool
+
+ // Skip expressions until a table is found. This is set to true when a
+ // table could not be created (missing field in map), so all KV expressions
+ // need to be skipped.
+ skipUntilTable bool
+
+ // Flag indicating that the current array/slice table should be cleared because
+ // it is the first encounter of an array table.
+ clearArrayTable bool
+
+ // Tracks position in Go arrays.
+ // This is used when decoding [[array tables]] into Go arrays. Given array
+ // tables are separate TOML expression, we need to keep track of where we
+ // are at in the Go array, as we can't just introspect its size.
+ arrayIndexes map[reflect.Value]int
+
+ // Tracks keys that have been seen, with which type.
+ seen tracker.SeenTracker
+
+ // Strict mode
+ strict strict
+
+ // Flag that enables/disables unmarshaler interface.
+ unmarshalerInterface bool
+
+ // Current context for the error.
+ errorContext *errorContext
+}
+
+type errorContext struct {
+ Struct reflect.Type
+ Field []int
+}
+
+func (d *decoder) typeMismatchError(toml string, target reflect.Type) error {
+ return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target))
+}
+
+func (d *decoder) typeMismatchString(toml string, target reflect.Type) string {
+ if d.errorContext != nil && d.errorContext.Struct != nil {
+ ctx := d.errorContext
+ f := ctx.Struct.FieldByIndex(ctx.Field)
+ return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type)
+ }
+ return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target)
+}
+
+func (d *decoder) expr() *unstable.Node {
+ return d.p.Expression()
+}
+
+func (d *decoder) nextExpr() bool {
+ if d.stashedExpr {
+ d.stashedExpr = false
+ return true
+ }
+ return d.p.NextExpression()
+}
+
+func (d *decoder) stashExpr() {
+ d.stashedExpr = true
+}
+
+func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int {
+ if d.arrayIndexes == nil {
+ d.arrayIndexes = make(map[reflect.Value]int, 1)
+ }
+
+ idx, ok := d.arrayIndexes[v]
+
+ if !ok {
+ d.arrayIndexes[v] = 0
+ } else if shouldAppend {
+ idx++
+ d.arrayIndexes[v] = idx
+ }
+
+ return idx
+}
+
+func (d *decoder) FromParser(v interface{}) error {
+ r := reflect.ValueOf(v)
+ if r.Kind() != reflect.Ptr {
+ return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind())
+ }
+
+ if r.IsNil() {
+ return fmt.Errorf("toml: decoding pointer target cannot be nil")
+ }
+
+ r = r.Elem()
+ if r.Kind() == reflect.Interface && r.IsNil() {
+ newMap := map[string]interface{}{}
+ r.Set(reflect.ValueOf(newMap))
+ }
+
+ err := d.fromParser(r)
+ if err == nil {
+ return d.strict.Error(d.p.Data())
+ }
+
+ var e *unstable.ParserError
+ if errors.As(err, &e) {
+ return wrapDecodeError(d.p.Data(), e)
+ }
+
+ return err
+}
+
+func (d *decoder) fromParser(root reflect.Value) error {
+ for d.nextExpr() {
+ err := d.handleRootExpression(d.expr(), root)
+ if err != nil {
+ return err
+ }
+ }
+
+ return d.p.Error()
+}
+
+/*
+Rules for the unmarshal code:
+
+- The stack is used to keep track of which values need to be set where.
+- handle* functions <=> switch on a given unstable.Kind.
+- unmarshalX* functions need to unmarshal a node of kind X.
+- An "object" is either a struct or a map.
+*/
+
+func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error {
+ var x reflect.Value
+ var err error
+ var first bool // used for to clear array tables on first use
+
+ if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) {
+ first, err = d.seen.CheckExpression(expr)
+ if err != nil {
+ return err
+ }
+ }
+
+ switch expr.Kind {
+ case unstable.KeyValue:
+ if d.skipUntilTable {
+ return nil
+ }
+ x, err = d.handleKeyValue(expr, v)
+ case unstable.Table:
+ d.skipUntilTable = false
+ d.strict.EnterTable(expr)
+ x, err = d.handleTable(expr.Key(), v)
+ case unstable.ArrayTable:
+ d.skipUntilTable = false
+ d.strict.EnterArrayTable(expr)
+ d.clearArrayTable = first
+ x, err = d.handleArrayTable(expr.Key(), v)
+ default:
+ panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind))
+ }
+
+ if d.skipUntilTable {
+ if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable {
+ d.strict.MissingTable(expr)
+ }
+ } else if err == nil && x.IsValid() {
+ v.Set(x)
+ }
+
+ return err
+}
+
+func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
+ if key.Next() {
+ return d.handleArrayTablePart(key, v)
+ }
+ return d.handleKeyValues(v)
+}
+
+func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
+ switch v.Kind() {
+ case reflect.Interface:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if elem.Kind() == reflect.Slice {
+ if elem.Type() != sliceInterfaceType {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if !elem.CanSet() {
+ nelem := reflect.New(sliceInterfaceType).Elem()
+ nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap()))
+ reflect.Copy(nelem, elem)
+ elem = nelem
+ }
+ if d.clearArrayTable && elem.Len() > 0 {
+ elem.SetLen(0)
+ d.clearArrayTable = false
+ }
+ }
+ return d.handleArrayTableCollectionLast(key, elem)
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ elem = ptr.Elem()
+ }
+
+ elem, err := d.handleArrayTableCollectionLast(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ v.Elem().Set(elem)
+
+ return v, nil
+ case reflect.Slice:
+ if d.clearArrayTable && v.Len() > 0 {
+ v.SetLen(0)
+ d.clearArrayTable = false
+ }
+ elemType := v.Type().Elem()
+ var elem reflect.Value
+ if elemType.Kind() == reflect.Interface {
+ elem = makeMapStringInterface()
+ } else {
+ elem = reflect.New(elemType).Elem()
+ }
+ elem2, err := d.handleArrayTable(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if elem2.IsValid() {
+ elem = elem2
+ }
+ return reflect.Append(v, elem), nil
+ case reflect.Array:
+ idx := d.arrayIndex(true, v)
+ if idx >= v.Len() {
+ return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx)
+ }
+ elem := v.Index(idx)
+ _, err := d.handleArrayTable(key, elem)
+ return v, err
+ default:
+ return reflect.Value{}, d.typeMismatchError("array table", v.Type())
+ }
+}
+
+// When parsing an array table expression, each part of the key needs to be
+// evaluated like a normal key, but if it returns a collection, it also needs to
+// point to the last element of the collection. Unless it is the last part of
+// the key, then it needs to create a new element at the end.
+func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
+ if key.IsLast() {
+ return d.handleArrayTableCollectionLast(key, v)
+ }
+
+ switch v.Kind() {
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ elem = ptr.Elem()
+ }
+
+ elem, err := d.handleArrayTableCollection(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if elem.IsValid() {
+ v.Elem().Set(elem)
+ }
+
+ return v, nil
+ case reflect.Slice:
+ elem := v.Index(v.Len() - 1)
+ x, err := d.handleArrayTable(key, elem)
+ if err != nil || d.skipUntilTable {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ elem.Set(x)
+ }
+
+ return v, err
+ case reflect.Array:
+ idx := d.arrayIndex(false, v)
+ if idx >= v.Len() {
+ return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx)
+ }
+ elem := v.Index(idx)
+ _, err := d.handleArrayTable(key, elem)
+ return v, err
+ }
+
+ return d.handleArrayTable(key, v)
+}
+
+func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) {
+ var rv reflect.Value
+
+ // First, dispatch over v to make sure it is a valid object.
+ // There is no guarantee over what it could be.
+ switch v.Kind() {
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ elem = v.Elem()
+ return d.handleKeyPart(key, elem, nextFn, makeFn)
+ case reflect.Map:
+ vt := v.Type()
+
+ // Create the key for the map element. Convert to key type.
+ mk, err := d.keyFromData(vt.Key(), key.Node().Data)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ // If the map does not exist, create it.
+ if v.IsNil() {
+ vt := v.Type()
+ v = reflect.MakeMap(vt)
+ rv = v
+ }
+
+ mv := v.MapIndex(mk)
+ set := false
+ if !mv.IsValid() {
+ // If there is no value in the map, create a new one according to
+ // the map type. If the element type is interface, create either a
+ // map[string]interface{} or a []interface{} depending on whether
+ // this is the last part of the array table key.
+
+ t := vt.Elem()
+ if t.Kind() == reflect.Interface {
+ mv = makeFn()
+ } else {
+ mv = reflect.New(t).Elem()
+ }
+ set = true
+ } else if mv.Kind() == reflect.Interface {
+ mv = mv.Elem()
+ if !mv.IsValid() {
+ mv = makeFn()
+ }
+ set = true
+ } else if !mv.CanAddr() {
+ vt := v.Type()
+ t := vt.Elem()
+ oldmv := mv
+ mv = reflect.New(t).Elem()
+ mv.Set(oldmv)
+ set = true
+ }
+
+ x, err := nextFn(key, mv)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ if x.IsValid() {
+ mv = x
+ set = true
+ }
+
+ if set {
+ v.SetMapIndex(mk, mv)
+ }
+ case reflect.Struct:
+ path, found := structFieldPath(v, string(key.Node().Data))
+ if !found {
+ d.skipUntilTable = true
+ return reflect.Value{}, nil
+ }
+
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ t := v.Type()
+ d.errorContext.Struct = t
+ d.errorContext.Field = path
+
+ f := fieldByIndex(v, path)
+ x, err := nextFn(key, f)
+ if err != nil || d.skipUntilTable {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ f.Set(x)
+ }
+ d.errorContext.Field = nil
+ d.errorContext.Struct = nil
+ case reflect.Interface:
+ if v.Elem().IsValid() {
+ v = v.Elem()
+ } else {
+ v = makeMapStringInterface()
+ }
+
+ x, err := d.handleKeyPart(key, v, nextFn, makeFn)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ v = x
+ }
+ rv = v
+ default:
+ panic(fmt.Errorf("unhandled part: %s", v.Kind()))
+ }
+
+ return rv, nil
+}
+
+// HandleArrayTablePart navigates the Go structure v using the key v. It is
+// only used for the prefix (non-last) parts of an array-table. When
+// encountering a collection, it should go to the last element.
+func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
+ var makeFn valueMakerFn
+ if key.IsLast() {
+ makeFn = makeSliceInterface
+ } else {
+ makeFn = makeMapStringInterface
+ }
+ return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn)
+}
+
+// HandleTable returns a reference when it has checked the next expression but
+// cannot handle it.
+func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
+ if v.Kind() == reflect.Slice {
+ if v.Len() == 0 {
+ return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice")
+ }
+ elem := v.Index(v.Len() - 1)
+ x, err := d.handleTable(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ elem.Set(x)
+ }
+ return reflect.Value{}, nil
+ }
+ if key.Next() {
+ // Still scoping the key
+ return d.handleTablePart(key, v)
+ }
+ // Done scoping the key.
+ // Now handle all the key-value expressions in this table.
+ return d.handleKeyValues(v)
+}
+
+// Handle root expressions until the end of the document or the next
+// non-key-value.
+func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) {
+ var rv reflect.Value
+ for d.nextExpr() {
+ expr := d.expr()
+ if expr.Kind != unstable.KeyValue {
+ // Stash the expression so that fromParser can just loop and use
+ // the right handler.
+ // We could just recurse ourselves here, but at least this gives a
+ // chance to pop the stack a bit.
+ d.stashExpr()
+ break
+ }
+
+ _, err := d.seen.CheckExpression(expr)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ x, err := d.handleKeyValue(expr, v)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ v = x
+ rv = x
+ }
+ }
+ return rv, nil
+}
+
+type (
+ handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error)
+ valueMakerFn func() reflect.Value
+)
+
+func makeMapStringInterface() reflect.Value {
+ return reflect.MakeMap(mapStringInterfaceType)
+}
+
+func makeSliceInterface() reflect.Value {
+ return reflect.MakeSlice(sliceInterfaceType, 0, 16)
+}
+
+func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
+ return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface)
+}
+
+func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) {
+ // Special case for time, because we allow to unmarshal to it from
+ // different kind of AST nodes.
+ if v.Type() == timeType {
+ return false, nil
+ }
+
+ if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) {
+ err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data)
+ if err != nil {
+ return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err)
+ }
+
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error {
+ for v.Kind() == reflect.Ptr {
+ v = initAndDereferencePointer(v)
+ }
+
+ if d.unmarshalerInterface {
+ if v.CanAddr() && v.Addr().CanInterface() {
+ if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok {
+ return outi.UnmarshalTOML(value)
+ }
+ }
+ }
+
+ ok, err := d.tryTextUnmarshaler(value, v)
+ if ok || err != nil {
+ return err
+ }
+
+ switch value.Kind {
+ case unstable.String:
+ return d.unmarshalString(value, v)
+ case unstable.Integer:
+ return d.unmarshalInteger(value, v)
+ case unstable.Float:
+ return d.unmarshalFloat(value, v)
+ case unstable.Bool:
+ return d.unmarshalBool(value, v)
+ case unstable.DateTime:
+ return d.unmarshalDateTime(value, v)
+ case unstable.LocalDate:
+ return d.unmarshalLocalDate(value, v)
+ case unstable.LocalTime:
+ return d.unmarshalLocalTime(value, v)
+ case unstable.LocalDateTime:
+ return d.unmarshalLocalDateTime(value, v)
+ case unstable.InlineTable:
+ return d.unmarshalInlineTable(value, v)
+ case unstable.Array:
+ return d.unmarshalArray(value, v)
+ default:
+ panic(fmt.Errorf("handleValue not implemented for %s", value.Kind))
+ }
+}
+
+func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ if v.IsNil() {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 16))
+ } else {
+ v.SetLen(0)
+ }
+ case reflect.Array:
+ // arrays are always initialized
+ case reflect.Interface:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if elem.Kind() == reflect.Slice {
+ if elem.Type() != sliceInterfaceType {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if !elem.CanSet() {
+ nelem := reflect.New(sliceInterfaceType).Elem()
+ nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap()))
+ reflect.Copy(nelem, elem)
+ elem = nelem
+ }
+ }
+ err := d.unmarshalArray(array, elem)
+ if err != nil {
+ return err
+ }
+ v.Set(elem)
+ return nil
+ default:
+ // TODO: use newDecodeError, but first the parser needs to fill
+ // array.Data.
+ return d.typeMismatchError("array", v.Type())
+ }
+
+ elemType := v.Type().Elem()
+
+ it := array.Children()
+ idx := 0
+ for it.Next() {
+ n := it.Node()
+
+ // TODO: optimize
+ if v.Kind() == reflect.Slice {
+ elem := reflect.New(elemType).Elem()
+
+ err := d.handleValue(n, elem)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.Append(v, elem))
+ } else { // array
+ if idx >= v.Len() {
+ return nil
+ }
+ elem := v.Index(idx)
+ err := d.handleValue(n, elem)
+ if err != nil {
+ return err
+ }
+ idx++
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error {
+ // Make sure v is an initialized object.
+ switch v.Kind() {
+ case reflect.Map:
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ case reflect.Struct:
+ // structs are always initialized.
+ case reflect.Interface:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ elem = makeMapStringInterface()
+ v.Set(elem)
+ }
+ return d.unmarshalInlineTable(itable, elem)
+ default:
+ return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind())
+ }
+
+ it := itable.Children()
+ for it.Next() {
+ n := it.Node()
+
+ x, err := d.handleKeyValue(n, v)
+ if err != nil {
+ return err
+ }
+ if x.IsValid() {
+ v = x
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error {
+ dt, err := parseDateTime(value.Data)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.ValueOf(dt))
+ return nil
+}
+
+func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error {
+ ld, err := parseLocalDate(value.Data)
+ if err != nil {
+ return err
+ }
+
+ if v.Type() == timeType {
+ cast := ld.AsTime(time.Local)
+ v.Set(reflect.ValueOf(cast))
+ return nil
+ }
+
+ v.Set(reflect.ValueOf(ld))
+
+ return nil
+}
+
+func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error {
+ lt, rest, err := parseLocalTime(value.Data)
+ if err != nil {
+ return err
+ }
+
+ if len(rest) > 0 {
+ return unstable.NewParserError(rest, "extra characters at the end of a local time")
+ }
+
+ v.Set(reflect.ValueOf(lt))
+ return nil
+}
+
+func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error {
+ ldt, rest, err := parseLocalDateTime(value.Data)
+ if err != nil {
+ return err
+ }
+
+ if len(rest) > 0 {
+ return unstable.NewParserError(rest, "extra characters at the end of a local date time")
+ }
+
+ if v.Type() == timeType {
+ cast := ldt.AsTime(time.Local)
+
+ v.Set(reflect.ValueOf(cast))
+ return nil
+ }
+
+ v.Set(reflect.ValueOf(ldt))
+
+ return nil
+}
+
+func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error {
+ b := value.Data[0] == 't'
+
+ switch v.Kind() {
+ case reflect.Bool:
+ v.SetBool(b)
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(b))
+ default:
+ return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b)
+ }
+
+ return nil
+}
+
+func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error {
+ f, err := parseFloat(value.Data)
+ if err != nil {
+ return err
+ }
+
+ switch v.Kind() {
+ case reflect.Float64:
+ v.SetFloat(f)
+ case reflect.Float32:
+ if f > math.MaxFloat32 {
+ return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f)
+ }
+ v.SetFloat(f)
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(f))
+ default:
+ return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind())
+ }
+
+ return nil
+}
+
+const (
+ maxInt = int64(^uint(0) >> 1)
+ minInt = -maxInt - 1
+)
+
+// Maximum value of uint for decoding. Currently the decoder parses the integer
+// into an int64. As a result, on architectures where uint is 64 bits, the
+// effective maximum uint we can decode is the maximum of int64. On
+// architectures where uint is 32 bits, the maximum value we can decode is
+// lower: the maximum of uint32. I didn't find a way to figure out this value at
+// compile time, so it is computed during initialization.
+var maxUint int64 = math.MaxInt64
+
+func init() {
+ m := uint64(^uint(0))
+ if m < uint64(maxUint) {
+ maxUint = int64(m)
+ }
+}
+
+func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error {
+ kind := v.Kind()
+ if kind == reflect.Float32 || kind == reflect.Float64 {
+ return d.unmarshalFloat(value, v)
+ }
+
+ i, err := parseInteger(value.Data)
+ if err != nil {
+ return err
+ }
+
+ var r reflect.Value
+
+ switch kind {
+ case reflect.Int64:
+ v.SetInt(i)
+ return nil
+ case reflect.Int32:
+ if i < math.MinInt32 || i > math.MaxInt32 {
+ return fmt.Errorf("toml: number %d does not fit in an int32", i)
+ }
+
+ r = reflect.ValueOf(int32(i))
+ case reflect.Int16:
+ if i < math.MinInt16 || i > math.MaxInt16 {
+ return fmt.Errorf("toml: number %d does not fit in an int16", i)
+ }
+
+ r = reflect.ValueOf(int16(i))
+ case reflect.Int8:
+ if i < math.MinInt8 || i > math.MaxInt8 {
+ return fmt.Errorf("toml: number %d does not fit in an int8", i)
+ }
+
+ r = reflect.ValueOf(int8(i))
+ case reflect.Int:
+ if i < minInt || i > maxInt {
+ return fmt.Errorf("toml: number %d does not fit in an int", i)
+ }
+
+ r = reflect.ValueOf(int(i))
+ case reflect.Uint64:
+ if i < 0 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint64", i)
+ }
+
+ r = reflect.ValueOf(uint64(i))
+ case reflect.Uint32:
+ if i < 0 || i > math.MaxUint32 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint32", i)
+ }
+
+ r = reflect.ValueOf(uint32(i))
+ case reflect.Uint16:
+ if i < 0 || i > math.MaxUint16 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint16", i)
+ }
+
+ r = reflect.ValueOf(uint16(i))
+ case reflect.Uint8:
+ if i < 0 || i > math.MaxUint8 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint8", i)
+ }
+
+ r = reflect.ValueOf(uint8(i))
+ case reflect.Uint:
+ if i < 0 || i > maxUint {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint", i)
+ }
+
+ r = reflect.ValueOf(uint(i))
+ case reflect.Interface:
+ r = reflect.ValueOf(i)
+ default:
+ return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type()))
+ }
+
+ if !r.Type().AssignableTo(v.Type()) {
+ r = r.Convert(v.Type())
+ }
+
+ v.Set(r)
+
+ return nil
+}
+
+func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.String:
+ v.SetString(string(value.Data))
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(string(value.Data)))
+ default:
+ return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type()))
+ }
+
+ return nil
+}
+
+func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) {
+ d.strict.EnterKeyValue(expr)
+
+ v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v)
+ if d.skipUntilTable {
+ d.strict.MissingField(expr)
+ d.skipUntilTable = false
+ }
+
+ d.strict.ExitKeyValue(expr)
+
+ return v, err
+}
+
+func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) {
+ if key.Next() {
+ // Still scoping the key
+ return d.handleKeyValuePart(key, value, v)
+ }
+ // Done scoping the key.
+ // v is whatever Go value we need to fill.
+ return reflect.Value{}, d.handleValue(value, v)
+}
+
+func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) {
+ switch {
+ case stringType.AssignableTo(keyType):
+ return reflect.ValueOf(string(data)), nil
+
+ case stringType.ConvertibleTo(keyType):
+ return reflect.ValueOf(string(data)).Convert(keyType), nil
+
+ case keyType.Implements(textUnmarshalerType):
+ mk := reflect.New(keyType.Elem())
+ if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err)
+ }
+ return mk, nil
+
+ case reflect.PointerTo(keyType).Implements(textUnmarshalerType):
+ mk := reflect.New(keyType)
+ if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err)
+ }
+ return mk.Elem(), nil
+
+ case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64:
+ key, err := strconv.ParseInt(string(data), 10, 64)
+ if err != nil {
+ return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err)
+ }
+ return reflect.ValueOf(key).Convert(keyType), nil
+ case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64:
+ key, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err)
+ }
+ return reflect.ValueOf(key).Convert(keyType), nil
+
+ case keyType.Kind() == reflect.Float32:
+ key, err := strconv.ParseFloat(string(data), 32)
+ if err != nil {
+ return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err)
+ }
+ return reflect.ValueOf(float32(key)), nil
+
+ case keyType.Kind() == reflect.Float64:
+ key, err := strconv.ParseFloat(string(data), 64)
+ if err != nil {
+ return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err)
+ }
+ return reflect.ValueOf(float64(key)), nil
+ }
+ return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType)
+}
+
+func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) {
+ // contains the replacement for v
+ var rv reflect.Value
+
+ // First, dispatch over v to make sure it is a valid object.
+ // There is no guarantee over what it could be.
+ switch v.Kind() {
+ case reflect.Map:
+ vt := v.Type()
+
+ mk, err := d.keyFromData(vt.Key(), key.Node().Data)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ // If the map does not exist, create it.
+ if v.IsNil() {
+ v = reflect.MakeMap(vt)
+ rv = v
+ }
+
+ mv := v.MapIndex(mk)
+ set := false
+ if !mv.IsValid() || key.IsLast() {
+ set = true
+ mv = reflect.New(v.Type().Elem()).Elem()
+ }
+
+ nv, err := d.handleKeyValueInner(key, value, mv)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if nv.IsValid() {
+ mv = nv
+ set = true
+ }
+
+ if set {
+ v.SetMapIndex(mk, mv)
+ }
+ case reflect.Struct:
+ path, found := structFieldPath(v, string(key.Node().Data))
+ if !found {
+ d.skipUntilTable = true
+ break
+ }
+
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ t := v.Type()
+ d.errorContext.Struct = t
+ d.errorContext.Field = path
+
+ f := fieldByIndex(v, path)
+
+ if !f.CanAddr() {
+ // If the field is not addressable, need to take a slower path and
+ // make a copy of the struct itself to a new location.
+ nvp := reflect.New(v.Type())
+ nvp.Elem().Set(v)
+ v = nvp.Elem()
+ _, err := d.handleKeyValuePart(key, value, v)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return nvp.Elem(), nil
+ }
+ x, err := d.handleKeyValueInner(key, value, f)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ if x.IsValid() {
+ f.Set(x)
+ }
+ d.errorContext.Struct = nil
+ d.errorContext.Field = nil
+ case reflect.Interface:
+ v = v.Elem()
+
+ // Following encoding/json: decoding an object into an
+ // interface{}, it needs to always hold a
+ // map[string]interface{}. This is for the types to be
+ // consistent whether a previous value was set or not.
+ if !v.IsValid() || v.Type() != mapStringInterfaceType {
+ v = makeMapStringInterface()
+ }
+
+ x, err := d.handleKeyValuePart(key, value, v)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ v = x
+ }
+ rv = v
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ rv = v
+ elem = ptr.Elem()
+ }
+
+ elem2, err := d.handleKeyValuePart(key, value, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if elem2.IsValid() {
+ elem = elem2
+ }
+ v.Elem().Set(elem)
+ default:
+ return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind())
+ }
+
+ return rv, nil
+}
+
+func initAndDereferencePointer(v reflect.Value) reflect.Value {
+ var elem reflect.Value
+ if v.IsNil() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ }
+ elem = v.Elem()
+ return elem
+}
+
+// Same as reflect.Value.FieldByIndex, but creates pointers if needed.
+func fieldByIndex(v reflect.Value, path []int) reflect.Value {
+ for _, x := range path {
+ v = v.Field(x)
+
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+type fieldPathsMap = map[string][]int
+
+var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap
+
+func structFieldPath(v reflect.Value, name string) ([]int, bool) {
+ t := v.Type()
+
+ cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap)
+ fieldPaths, ok := cache[danger.MakeTypeID(t)]
+
+ if !ok {
+ fieldPaths = map[string][]int{}
+
+ forEachField(t, nil, func(name string, path []int) {
+ fieldPaths[name] = path
+ // extra copy for the case-insensitive match
+ fieldPaths[strings.ToLower(name)] = path
+ })
+
+ newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1)
+ newCache[danger.MakeTypeID(t)] = fieldPaths
+ for k, v := range cache {
+ newCache[k] = v
+ }
+ globalFieldPathsCache.Store(newCache)
+ }
+
+ path, ok := fieldPaths[name]
+ if !ok {
+ path, ok = fieldPaths[strings.ToLower(name)]
+ }
+ return path, ok
+}
+
+func forEachField(t reflect.Type, path []int, do func(name string, path []int)) {
+ n := t.NumField()
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+
+ if !f.Anonymous && f.PkgPath != "" {
+ // only consider exported fields.
+ continue
+ }
+
+ fieldPath := append(path, i)
+ fieldPath = fieldPath[:len(fieldPath):len(fieldPath)]
+
+ name := f.Tag.Get("toml")
+ if name == "-" {
+ continue
+ }
+
+ if i := strings.IndexByte(name, ','); i >= 0 {
+ name = name[:i]
+ }
+
+ if f.Anonymous && name == "" {
+ t2 := f.Type
+ if t2.Kind() == reflect.Ptr {
+ t2 = t2.Elem()
+ }
+
+ if t2.Kind() == reflect.Struct {
+ forEachField(t2, fieldPath, do)
+ }
+ continue
+ }
+
+ if name == "" {
+ name = f.Name
+ }
+
+ do(name, fieldPath)
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
new file mode 100644
index 000000000000..f526bf2c090f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
@@ -0,0 +1,136 @@
+package unstable
+
+import (
+ "fmt"
+ "unsafe"
+
+ "github.com/pelletier/go-toml/v2/internal/danger"
+)
+
+// Iterator over a sequence of nodes.
+//
+// Starts uninitialized, you need to call Next() first.
+//
+// For example:
+//
+// it := n.Children()
+// for it.Next() {
+// n := it.Node()
+// // do something with n
+// }
+type Iterator struct {
+ started bool
+ node *Node
+}
+
+// Next moves the iterator forward and returns true if points to a
+// node, false otherwise.
+func (c *Iterator) Next() bool {
+ if !c.started {
+ c.started = true
+ } else if c.node.Valid() {
+ c.node = c.node.Next()
+ }
+ return c.node.Valid()
+}
+
+// IsLast returns true if the current node of the iterator is the last
+// one. Subsequent calls to Next() will return false.
+func (c *Iterator) IsLast() bool {
+ return c.node.next == 0
+}
+
+// Node returns a pointer to the node pointed at by the iterator.
+func (c *Iterator) Node() *Node {
+ return c.node
+}
+
+// Node in a TOML expression AST.
+//
+// Depending on Kind, its sequence of children should be interpreted
+// differently.
+//
+// - Array have one child per element in the array.
+// - InlineTable have one child per key-value in the table (each of kind
+// InlineTable).
+// - KeyValue have at least two children. The first one is the value. The rest
+// make a potentially dotted key.
+// - Table and ArrayTable's children represent a dotted key (same as
+// KeyValue, but without the first node being the value).
+//
+// When relevant, Raw describes the range of bytes this node is referring to in
+// the input document. Use Parser.Raw() to retrieve the actual bytes.
+type Node struct {
+ Kind Kind
+ Raw Range // Raw bytes from the input.
+ Data []byte // Node value (either allocated or referencing the input).
+
+ // References to other nodes, as offsets in the backing array
+ // from this node. References can go backward, so those can be
+ // negative.
+ next int // 0 if last element
+ child int // 0 if no child
+}
+
+// Range of bytes in the document.
+type Range struct {
+ Offset uint32
+ Length uint32
+}
+
+// Next returns a pointer to the next node, or nil if there is no next node.
+func (n *Node) Next() *Node {
+ if n.next == 0 {
+ return nil
+ }
+ ptr := unsafe.Pointer(n)
+ size := unsafe.Sizeof(Node{})
+ return (*Node)(danger.Stride(ptr, size, n.next))
+}
+
+// Child returns a pointer to the first child node of this node. Other children
+// can be accessed calling Next on the first child. Returns an nil if this Node
+// has no child.
+func (n *Node) Child() *Node {
+ if n.child == 0 {
+ return nil
+ }
+ ptr := unsafe.Pointer(n)
+ size := unsafe.Sizeof(Node{})
+ return (*Node)(danger.Stride(ptr, size, n.child))
+}
+
+// Valid returns true if the node's kind is set (not to Invalid).
+func (n *Node) Valid() bool {
+ return n != nil
+}
+
+// Key returns the children nodes making the Key on a supported node. Panics
+// otherwise. They are guaranteed to be all be of the Kind Key. A simple key
+// would return just one element.
+func (n *Node) Key() Iterator {
+ switch n.Kind {
+ case KeyValue:
+ value := n.Child()
+ if !value.Valid() {
+ panic(fmt.Errorf("KeyValue should have at least two children"))
+ }
+ return Iterator{node: value.Next()}
+ case Table, ArrayTable:
+ return Iterator{node: n.Child()}
+ default:
+ panic(fmt.Errorf("Key() is not supported on a %s", n.Kind))
+ }
+}
+
+// Value returns a pointer to the value node of a KeyValue.
+// Guaranteed to be non-nil. Panics if not called on a KeyValue node,
+// or if the Children are malformed.
+func (n *Node) Value() *Node {
+ return n.Child()
+}
+
+// Children returns an iterator over a node's children.
+func (n *Node) Children() Iterator {
+ return Iterator{node: n.Child()}
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
new file mode 100644
index 000000000000..9538e30df93a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
@@ -0,0 +1,71 @@
+package unstable
+
+// root contains a full AST.
+//
+// It is immutable once constructed with Builder.
+type root struct {
+ nodes []Node
+}
+
+// Iterator over the top level nodes.
+func (r *root) Iterator() Iterator {
+ it := Iterator{}
+ if len(r.nodes) > 0 {
+ it.node = &r.nodes[0]
+ }
+ return it
+}
+
+func (r *root) at(idx reference) *Node {
+ return &r.nodes[idx]
+}
+
+type reference int
+
+const invalidReference reference = -1
+
+func (r reference) Valid() bool {
+ return r != invalidReference
+}
+
+type builder struct {
+ tree root
+ lastIdx int
+}
+
+func (b *builder) Tree() *root {
+ return &b.tree
+}
+
+func (b *builder) NodeAt(ref reference) *Node {
+ return b.tree.at(ref)
+}
+
+func (b *builder) Reset() {
+ b.tree.nodes = b.tree.nodes[:0]
+ b.lastIdx = 0
+}
+
+func (b *builder) Push(n Node) reference {
+ b.lastIdx = len(b.tree.nodes)
+ b.tree.nodes = append(b.tree.nodes, n)
+ return reference(b.lastIdx)
+}
+
+func (b *builder) PushAndChain(n Node) reference {
+ newIdx := len(b.tree.nodes)
+ b.tree.nodes = append(b.tree.nodes, n)
+ if b.lastIdx >= 0 {
+ b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
+ }
+ b.lastIdx = newIdx
+ return reference(b.lastIdx)
+}
+
+func (b *builder) AttachChild(parent reference, child reference) {
+ b.tree.nodes[parent].child = int(child) - int(parent)
+}
+
+func (b *builder) Chain(from reference, to reference) {
+ b.tree.nodes[from].next = int(to) - int(from)
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
new file mode 100644
index 000000000000..7ff26c53c7b4
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
@@ -0,0 +1,3 @@
+// Package unstable provides APIs that do not meet the backward compatibility
+// guarantees yet.
+package unstable
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
new file mode 100644
index 000000000000..ff9df1bef84f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
@@ -0,0 +1,71 @@
+package unstable
+
+import "fmt"
+
+// Kind represents the type of TOML structure contained in a given Node.
+type Kind int
+
+const (
+ // Meta
+ Invalid Kind = iota
+ Comment
+ Key
+
+ // Top level structures
+ Table
+ ArrayTable
+ KeyValue
+
+ // Containers values
+ Array
+ InlineTable
+
+ // Values
+ String
+ Bool
+ Float
+ Integer
+ LocalDate
+ LocalTime
+ LocalDateTime
+ DateTime
+)
+
+// String implementation of fmt.Stringer.
+func (k Kind) String() string {
+ switch k {
+ case Invalid:
+ return "Invalid"
+ case Comment:
+ return "Comment"
+ case Key:
+ return "Key"
+ case Table:
+ return "Table"
+ case ArrayTable:
+ return "ArrayTable"
+ case KeyValue:
+ return "KeyValue"
+ case Array:
+ return "Array"
+ case InlineTable:
+ return "InlineTable"
+ case String:
+ return "String"
+ case Bool:
+ return "Bool"
+ case Float:
+ return "Float"
+ case Integer:
+ return "Integer"
+ case LocalDate:
+ return "LocalDate"
+ case LocalTime:
+ return "LocalTime"
+ case LocalDateTime:
+ return "LocalDateTime"
+ case DateTime:
+ return "DateTime"
+ }
+ panic(fmt.Errorf("Kind.String() not implemented for '%d'", k))
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
new file mode 100644
index 000000000000..50358a44ffd6
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
@@ -0,0 +1,1245 @@
+package unstable
+
+import (
+ "bytes"
+ "fmt"
+ "unicode"
+
+ "github.com/pelletier/go-toml/v2/internal/characters"
+ "github.com/pelletier/go-toml/v2/internal/danger"
+)
+
+// ParserError describes an error relative to the content of the document.
+//
+// It cannot outlive the instance of Parser it refers to, and may cause panics
+// if the parser is reset.
+type ParserError struct {
+ Highlight []byte
+ Message string
+ Key []string // optional
+}
+
+// Error is the implementation of the error interface.
+func (e *ParserError) Error() string {
+ return e.Message
+}
+
+// NewParserError is a convenience function to create a ParserError
+//
+// Warning: Highlight needs to be a subslice of Parser.data, so only slices
+// returned by Parser.Raw are valid candidates.
+func NewParserError(highlight []byte, format string, args ...interface{}) error {
+ return &ParserError{
+ Highlight: highlight,
+ Message: fmt.Errorf(format, args...).Error(),
+ }
+}
+
+// Parser scans over a TOML-encoded document and generates an iterative AST.
+//
+// To prime the Parser, first reset it with the contents of a TOML document.
+// Then, process all top-level expressions sequentially. See Example.
+//
+// Don't forget to check Error() after you're done parsing.
+//
+// Each top-level expression needs to be fully processed before calling
+// NextExpression() again. Otherwise, calls to various Node methods may panic if
+// the parser has moved on the next expression.
+//
+// For performance reasons, go-toml doesn't make a copy of the input bytes to
+// the parser. Make sure to copy all the bytes you need to outlive the slice
+// given to the parser.
+type Parser struct {
+ data []byte
+ builder builder
+ ref reference
+ left []byte
+ err error
+ first bool
+
+ KeepComments bool
+}
+
+// Data returns the slice provided to the last call to Reset.
+func (p *Parser) Data() []byte {
+ return p.data
+}
+
+// Range returns a range description that corresponds to a given slice of the
+// input. If the argument is not a subslice of the parser input, this function
+// panics.
+func (p *Parser) Range(b []byte) Range {
+ return Range{
+ Offset: uint32(danger.SubsliceOffset(p.data, b)),
+ Length: uint32(len(b)),
+ }
+}
+
+// Raw returns the slice corresponding to the bytes in the given range.
+func (p *Parser) Raw(raw Range) []byte {
+ return p.data[raw.Offset : raw.Offset+raw.Length]
+}
+
+// Reset brings the parser to its initial state for a given input. It wipes an
+// reuses internal storage to reduce allocation.
+func (p *Parser) Reset(b []byte) {
+ p.builder.Reset()
+ p.ref = invalidReference
+ p.data = b
+ p.left = b
+ p.err = nil
+ p.first = true
+}
+
+// NextExpression parses the next top-level expression. If an expression was
+// successfully parsed, it returns true. If the parser is at the end of the
+// document or an error occurred, it returns false.
+//
+// Retrieve the parsed expression with Expression().
+func (p *Parser) NextExpression() bool {
+ if len(p.left) == 0 || p.err != nil {
+ return false
+ }
+
+ p.builder.Reset()
+ p.ref = invalidReference
+
+ for {
+ if len(p.left) == 0 || p.err != nil {
+ return false
+ }
+
+ if !p.first {
+ p.left, p.err = p.parseNewline(p.left)
+ }
+
+ if len(p.left) == 0 || p.err != nil {
+ return false
+ }
+
+ p.ref, p.left, p.err = p.parseExpression(p.left)
+
+ if p.err != nil {
+ return false
+ }
+
+ p.first = false
+
+ if p.ref.Valid() {
+ return true
+ }
+ }
+}
+
+// Expression returns a pointer to the node representing the last successfully
+// parsed expression.
+func (p *Parser) Expression() *Node {
+ return p.builder.NodeAt(p.ref)
+}
+
+// Error returns any error that has occurred during parsing.
+func (p *Parser) Error() error {
+ return p.err
+}
+
+// Position describes a position in the input.
+type Position struct {
+ // Number of bytes from the beginning of the input.
+ Offset int
+ // Line number, starting at 1.
+ Line int
+ // Column number, starting at 1.
+ Column int
+}
+
+// Shape describes the position of a range in the input.
+type Shape struct {
+ Start Position
+ End Position
+}
+
+func (p *Parser) position(b []byte) Position {
+ offset := danger.SubsliceOffset(p.data, b)
+
+ lead := p.data[:offset]
+
+ return Position{
+ Offset: offset,
+ Line: bytes.Count(lead, []byte{'\n'}) + 1,
+ Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}),
+ }
+}
+
+// Shape returns the shape of the given range in the input. Will
+// panic if the range is not a subslice of the input.
+func (p *Parser) Shape(r Range) Shape {
+ raw := p.Raw(r)
+ return Shape{
+ Start: p.position(raw),
+ End: p.position(raw[r.Length:]),
+ }
+}
+
+func (p *Parser) parseNewline(b []byte) ([]byte, error) {
+ if b[0] == '\n' {
+ return b[1:], nil
+ }
+
+ if b[0] == '\r' {
+ _, rest, err := scanWindowsNewline(b)
+ return rest, err
+ }
+
+ return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0])
+}
+
+func (p *Parser) parseComment(b []byte) (reference, []byte, error) {
+ ref := invalidReference
+ data, rest, err := scanComment(b)
+ if p.KeepComments && err == nil {
+ ref = p.builder.Push(Node{
+ Kind: Comment,
+ Raw: p.Range(data),
+ Data: data,
+ })
+ }
+ return ref, rest, err
+}
+
+func (p *Parser) parseExpression(b []byte) (reference, []byte, error) {
+ // expression = ws [ comment ]
+ // expression =/ ws keyval ws [ comment ]
+ // expression =/ ws table ws [ comment ]
+ ref := invalidReference
+
+ b = p.parseWhitespace(b)
+
+ if len(b) == 0 {
+ return ref, b, nil
+ }
+
+ if b[0] == '#' {
+ ref, rest, err := p.parseComment(b)
+ return ref, rest, err
+ }
+
+ if b[0] == '\n' || b[0] == '\r' {
+ return ref, b, nil
+ }
+
+ var err error
+ if b[0] == '[' {
+ ref, b, err = p.parseTable(b)
+ } else {
+ ref, b, err = p.parseKeyval(b)
+ }
+
+ if err != nil {
+ return ref, nil, err
+ }
+
+ b = p.parseWhitespace(b)
+
+ if len(b) > 0 && b[0] == '#' {
+ cref, rest, err := p.parseComment(b)
+ if cref != invalidReference {
+ p.builder.Chain(ref, cref)
+ }
+ return ref, rest, err
+ }
+
+ return ref, b, nil
+}
+
+func (p *Parser) parseTable(b []byte) (reference, []byte, error) {
+ // table = std-table / array-table
+ if len(b) > 1 && b[1] == '[' {
+ return p.parseArrayTable(b)
+ }
+
+ return p.parseStdTable(b)
+}
+
+func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) {
+ // array-table = array-table-open key array-table-close
+ // array-table-open = %x5B.5B ws ; [[ Double left square bracket
+ // array-table-close = ws %x5D.5D ; ]] Double right square bracket
+ ref := p.builder.Push(Node{
+ Kind: ArrayTable,
+ })
+
+ b = b[2:]
+ b = p.parseWhitespace(b)
+
+ k, b, err := p.parseKey(b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ p.builder.AttachChild(ref, k)
+ b = p.parseWhitespace(b)
+
+ b, err = expect(']', b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ b, err = expect(']', b)
+
+ return ref, b, err
+}
+
+func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) {
+ // std-table = std-table-open key std-table-close
+ // std-table-open = %x5B ws ; [ Left square bracket
+ // std-table-close = ws %x5D ; ] Right square bracket
+ ref := p.builder.Push(Node{
+ Kind: Table,
+ })
+
+ b = b[1:]
+ b = p.parseWhitespace(b)
+
+ key, b, err := p.parseKey(b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ p.builder.AttachChild(ref, key)
+
+ b = p.parseWhitespace(b)
+
+ b, err = expect(']', b)
+
+ return ref, b, err
+}
+
+func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) {
+ // keyval = key keyval-sep val
+ ref := p.builder.Push(Node{
+ Kind: KeyValue,
+ })
+
+ key, b, err := p.parseKey(b)
+ if err != nil {
+ return invalidReference, nil, err
+ }
+
+ // keyval-sep = ws %x3D ws ; =
+
+ b = p.parseWhitespace(b)
+
+ if len(b) == 0 {
+ return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there")
+ }
+
+ b, err = expect('=', b)
+ if err != nil {
+ return invalidReference, nil, err
+ }
+
+ b = p.parseWhitespace(b)
+
+ valRef, b, err := p.parseVal(b)
+ if err != nil {
+ return ref, b, err
+ }
+
+ p.builder.Chain(valRef, key)
+ p.builder.AttachChild(ref, valRef)
+
+ return ref, b, err
+}
+
+//nolint:cyclop,funlen
+func (p *Parser) parseVal(b []byte) (reference, []byte, error) {
+ // val = string / boolean / array / inline-table / date-time / float / integer
+ ref := invalidReference
+
+ if len(b) == 0 {
+ return ref, nil, NewParserError(b, "expected value, not eof")
+ }
+
+ var err error
+ c := b[0]
+
+ switch c {
+ case '"':
+ var raw []byte
+ var v []byte
+ if scanFollowsMultilineBasicStringDelimiter(b) {
+ raw, v, b, err = p.parseMultilineBasicString(b)
+ } else {
+ raw, v, b, err = p.parseBasicString(b)
+ }
+
+ if err == nil {
+ ref = p.builder.Push(Node{
+ Kind: String,
+ Raw: p.Range(raw),
+ Data: v,
+ })
+ }
+
+ return ref, b, err
+ case '\'':
+ var raw []byte
+ var v []byte
+ if scanFollowsMultilineLiteralStringDelimiter(b) {
+ raw, v, b, err = p.parseMultilineLiteralString(b)
+ } else {
+ raw, v, b, err = p.parseLiteralString(b)
+ }
+
+ if err == nil {
+ ref = p.builder.Push(Node{
+ Kind: String,
+ Raw: p.Range(raw),
+ Data: v,
+ })
+ }
+
+ return ref, b, err
+ case 't':
+ if !scanFollowsTrue(b) {
+ return ref, nil, NewParserError(atmost(b, 4), "expected 'true'")
+ }
+
+ ref = p.builder.Push(Node{
+ Kind: Bool,
+ Data: b[:4],
+ })
+
+ return ref, b[4:], nil
+ case 'f':
+ if !scanFollowsFalse(b) {
+ return ref, nil, NewParserError(atmost(b, 5), "expected 'false'")
+ }
+
+ ref = p.builder.Push(Node{
+ Kind: Bool,
+ Data: b[:5],
+ })
+
+ return ref, b[5:], nil
+ case '[':
+ return p.parseValArray(b)
+ case '{':
+ return p.parseInlineTable(b)
+ default:
+ return p.parseIntOrFloatOrDateTime(b)
+ }
+}
+
+func atmost(b []byte, n int) []byte {
+ if n >= len(b) {
+ return b
+ }
+
+ return b[:n]
+}
+
+func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
+ v, rest, err := scanLiteralString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return v, v[1 : len(v)-1], rest, nil
+}
+
+func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) {
+ // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
+ // inline-table-open = %x7B ws ; {
+ // inline-table-close = ws %x7D ; }
+ // inline-table-sep = ws %x2C ws ; , Comma
+ // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
+ parent := p.builder.Push(Node{
+ Kind: InlineTable,
+ Raw: p.Range(b[:1]),
+ })
+
+ first := true
+
+ var child reference
+
+ b = b[1:]
+
+ var err error
+
+ for len(b) > 0 {
+ previousB := b
+ b = p.parseWhitespace(b)
+
+ if len(b) == 0 {
+ return parent, nil, NewParserError(previousB[:1], "inline table is incomplete")
+ }
+
+ if b[0] == '}' {
+ break
+ }
+
+ if !first {
+ b, err = expect(',', b)
+ if err != nil {
+ return parent, nil, err
+ }
+ b = p.parseWhitespace(b)
+ }
+
+ var kv reference
+
+ kv, b, err = p.parseKeyval(b)
+ if err != nil {
+ return parent, nil, err
+ }
+
+ if first {
+ p.builder.AttachChild(parent, kv)
+ } else {
+ p.builder.Chain(child, kv)
+ }
+ child = kv
+
+ first = false
+ }
+
+ rest, err := expect('}', b)
+
+ return parent, rest, err
+}
+
+//nolint:funlen,cyclop
+func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
+ // array = array-open [ array-values ] ws-comment-newline array-close
+ // array-open = %x5B ; [
+ // array-close = %x5D ; ]
+ // array-values = ws-comment-newline val ws-comment-newline array-sep array-values
+ // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ]
+ // array-sep = %x2C ; , Comma
+ // ws-comment-newline = *( wschar / [ comment ] newline )
+ arrayStart := b
+ b = b[1:]
+
+ parent := p.builder.Push(Node{
+ Kind: Array,
+ })
+
+ // First indicates whether the parser is looking for the first element
+ // (non-comment) of the array.
+ first := true
+
+ lastChild := invalidReference
+
+ addChild := func(valueRef reference) {
+ if lastChild == invalidReference {
+ p.builder.AttachChild(parent, valueRef)
+ } else {
+ p.builder.Chain(lastChild, valueRef)
+ }
+ lastChild = valueRef
+ }
+
+ var err error
+ for len(b) > 0 {
+ cref := invalidReference
+ cref, b, err = p.parseOptionalWhitespaceCommentNewline(b)
+ if err != nil {
+ return parent, nil, err
+ }
+
+ if cref != invalidReference {
+ addChild(cref)
+ }
+
+ if len(b) == 0 {
+ return parent, nil, NewParserError(arrayStart[:1], "array is incomplete")
+ }
+
+ if b[0] == ']' {
+ break
+ }
+
+ if b[0] == ',' {
+ if first {
+ return parent, nil, NewParserError(b[0:1], "array cannot start with comma")
+ }
+ b = b[1:]
+
+ cref, b, err = p.parseOptionalWhitespaceCommentNewline(b)
+ if err != nil {
+ return parent, nil, err
+ }
+ if cref != invalidReference {
+ addChild(cref)
+ }
+ } else if !first {
+ return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas")
+ }
+
+ // TOML allows trailing commas in arrays.
+ if len(b) > 0 && b[0] == ']' {
+ break
+ }
+
+ var valueRef reference
+ valueRef, b, err = p.parseVal(b)
+ if err != nil {
+ return parent, nil, err
+ }
+
+ addChild(valueRef)
+
+ cref, b, err = p.parseOptionalWhitespaceCommentNewline(b)
+ if err != nil {
+ return parent, nil, err
+ }
+ if cref != invalidReference {
+ addChild(cref)
+ }
+
+ first = false
+ }
+
+ rest, err := expect(']', b)
+
+ return parent, rest, err
+}
+
+func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) {
+ rootCommentRef := invalidReference
+ latestCommentRef := invalidReference
+
+ addComment := func(ref reference) {
+ if rootCommentRef == invalidReference {
+ rootCommentRef = ref
+ } else if latestCommentRef == invalidReference {
+ p.builder.AttachChild(rootCommentRef, ref)
+ latestCommentRef = ref
+ } else {
+ p.builder.Chain(latestCommentRef, ref)
+ latestCommentRef = ref
+ }
+ }
+
+ for len(b) > 0 {
+ var err error
+ b = p.parseWhitespace(b)
+
+ if len(b) > 0 && b[0] == '#' {
+ var ref reference
+ ref, b, err = p.parseComment(b)
+ if err != nil {
+ return invalidReference, nil, err
+ }
+ if ref != invalidReference {
+ addComment(ref)
+ }
+ }
+
+ if len(b) == 0 {
+ break
+ }
+
+ if b[0] == '\n' || b[0] == '\r' {
+ b, err = p.parseNewline(b)
+ if err != nil {
+ return invalidReference, nil, err
+ }
+ } else {
+ break
+ }
+ }
+
+ return rootCommentRef, b, nil
+}
+
+func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) {
+ token, rest, err := scanMultilineLiteralString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ i := 3
+
+ // skip the immediate new line
+ if token[i] == '\n' {
+ i++
+ } else if token[i] == '\r' && token[i+1] == '\n' {
+ i += 2
+ }
+
+ return token, token[i : len(token)-3], rest, err
+}
+
+//nolint:funlen,gocognit,cyclop
+func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) {
+ // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
+ // ml-basic-string-delim
+ // ml-basic-string-delim = 3quotation-mark
+ // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
+ //
+ // mlb-content = mlb-char / newline / mlb-escaped-nl
+ // mlb-char = mlb-unescaped / escaped
+ // mlb-quotes = 1*2quotation-mark
+ // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // mlb-escaped-nl = escape ws newline *( wschar / newline )
+ token, escaped, rest, err := scanMultilineBasicString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ i := 3
+
+ // skip the immediate new line
+ if token[i] == '\n' {
+ i++
+ } else if token[i] == '\r' && token[i+1] == '\n' {
+ i += 2
+ }
+
+ // fast path
+ startIdx := i
+ endIdx := len(token) - len(`"""`)
+
+ if !escaped {
+ str := token[startIdx:endIdx]
+ verr := characters.Utf8TomlValidAlreadyEscaped(str)
+ if verr.Zero() {
+ return token, str, rest, nil
+ }
+ return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
+ }
+
+ var builder bytes.Buffer
+
+ // The scanner ensures that the token starts and ends with quotes and that
+ // escapes are balanced.
+ for i < len(token)-3 {
+ c := token[i]
+
+ //nolint:nestif
+ if c == '\\' {
+ // When the last non-whitespace character on a line is an unescaped \,
+ // it will be trimmed along with all whitespace (including newlines) up
+ // to the next non-whitespace character or closing delimiter.
+
+ isLastNonWhitespaceOnLine := false
+ j := 1
+ findEOLLoop:
+ for ; j < len(token)-3-i; j++ {
+ switch token[i+j] {
+ case ' ', '\t':
+ continue
+ case '\r':
+ if token[i+j+1] == '\n' {
+ continue
+ }
+ case '\n':
+ isLastNonWhitespaceOnLine = true
+ }
+ break findEOLLoop
+ }
+ if isLastNonWhitespaceOnLine {
+ i += j
+ for ; i < len(token)-3; i++ {
+ c := token[i]
+ if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') {
+ i--
+ break
+ }
+ }
+ i++
+ continue
+ }
+
+ // handle escaping
+ i++
+ c = token[i]
+
+ switch c {
+ case '"', '\\':
+ builder.WriteByte(c)
+ case 'b':
+ builder.WriteByte('\b')
+ case 'f':
+ builder.WriteByte('\f')
+ case 'n':
+ builder.WriteByte('\n')
+ case 'r':
+ builder.WriteByte('\r')
+ case 't':
+ builder.WriteByte('\t')
+ case 'e':
+ builder.WriteByte(0x1B)
+ case 'u':
+ x, err := hexToRune(atmost(token[i+1:], 4), 4)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ builder.WriteRune(x)
+ i += 4
+ case 'U':
+ x, err := hexToRune(atmost(token[i+1:], 8), 8)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ builder.WriteRune(x)
+ i += 8
+ default:
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c)
+ }
+ i++
+ } else {
+ size := characters.Utf8ValidNext(token[i:])
+ if size == 0 {
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c)
+ }
+ builder.Write(token[i : i+size])
+ i += size
+ }
+ }
+
+ return token, builder.Bytes(), rest, nil
+}
+
+func (p *Parser) parseKey(b []byte) (reference, []byte, error) {
+ // key = simple-key / dotted-key
+ // simple-key = quoted-key / unquoted-key
+ //
+ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+ // quoted-key = basic-string / literal-string
+ // dotted-key = simple-key 1*( dot-sep simple-key )
+ //
+ // dot-sep = ws %x2E ws ; . Period
+ raw, key, b, err := p.parseSimpleKey(b)
+ if err != nil {
+ return invalidReference, nil, err
+ }
+
+ ref := p.builder.Push(Node{
+ Kind: Key,
+ Raw: p.Range(raw),
+ Data: key,
+ })
+
+ for {
+ b = p.parseWhitespace(b)
+ if len(b) > 0 && b[0] == '.' {
+ b = p.parseWhitespace(b[1:])
+
+ raw, key, b, err = p.parseSimpleKey(b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ p.builder.PushAndChain(Node{
+ Kind: Key,
+ Raw: p.Range(raw),
+ Data: key,
+ })
+ } else {
+ break
+ }
+ }
+
+ return ref, b, nil
+}
+
+func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
+ if len(b) == 0 {
+ return nil, nil, nil, NewParserError(b, "expected key but found none")
+ }
+
+ // simple-key = quoted-key / unquoted-key
+ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+ // quoted-key = basic-string / literal-string
+ switch {
+ case b[0] == '\'':
+ return p.parseLiteralString(b)
+ case b[0] == '"':
+ return p.parseBasicString(b)
+ case isUnquotedKeyChar(b[0]):
+ key, rest = scanUnquotedKey(b)
+ return key, key, rest, nil
+ default:
+ return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0])
+ }
+}
+
+//nolint:funlen,cyclop
+func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
+ // basic-string = quotation-mark *basic-char quotation-mark
+ // quotation-mark = %x22 ; "
+ // basic-char = basic-unescaped / escaped
+ // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // escaped = escape escape-seq-char
+ // escape-seq-char = %x22 ; " quotation mark U+0022
+ // escape-seq-char =/ %x5C ; \ reverse solidus U+005C
+ // escape-seq-char =/ %x62 ; b backspace U+0008
+ // escape-seq-char =/ %x66 ; f form feed U+000C
+ // escape-seq-char =/ %x6E ; n line feed U+000A
+ // escape-seq-char =/ %x72 ; r carriage return U+000D
+ // escape-seq-char =/ %x74 ; t tab U+0009
+ // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
+ // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
+ token, escaped, rest, err := scanBasicString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ startIdx := len(`"`)
+ endIdx := len(token) - len(`"`)
+
+ // Fast path. If there is no escape sequence, the string should just be
+ // an UTF-8 encoded string, which is the same as Go. In that case,
+ // validate the string and return a direct reference to the buffer.
+ if !escaped {
+ str := token[startIdx:endIdx]
+ verr := characters.Utf8TomlValidAlreadyEscaped(str)
+ if verr.Zero() {
+ return token, str, rest, nil
+ }
+ return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
+ }
+
+ i := startIdx
+
+ var builder bytes.Buffer
+
+ // The scanner ensures that the token starts and ends with quotes and that
+ // escapes are balanced.
+ for i < len(token)-1 {
+ c := token[i]
+ if c == '\\' {
+ i++
+ c = token[i]
+
+ switch c {
+ case '"', '\\':
+ builder.WriteByte(c)
+ case 'b':
+ builder.WriteByte('\b')
+ case 'f':
+ builder.WriteByte('\f')
+ case 'n':
+ builder.WriteByte('\n')
+ case 'r':
+ builder.WriteByte('\r')
+ case 't':
+ builder.WriteByte('\t')
+ case 'e':
+ builder.WriteByte(0x1B)
+ case 'u':
+ x, err := hexToRune(token[i+1:len(token)-1], 4)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ builder.WriteRune(x)
+ i += 4
+ case 'U':
+ x, err := hexToRune(token[i+1:len(token)-1], 8)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ builder.WriteRune(x)
+ i += 8
+ default:
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c)
+ }
+ i++
+ } else {
+ size := characters.Utf8ValidNext(token[i:])
+ if size == 0 {
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c)
+ }
+ builder.Write(token[i : i+size])
+ i += size
+ }
+ }
+
+ return token, builder.Bytes(), rest, nil
+}
+
+func hexToRune(b []byte, length int) (rune, error) {
+ if len(b) < length {
+ return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b))
+ }
+ b = b[:length]
+
+ var r uint32
+ for i, c := range b {
+ d := uint32(0)
+ switch {
+ case '0' <= c && c <= '9':
+ d = uint32(c - '0')
+ case 'a' <= c && c <= 'f':
+ d = uint32(c - 'a' + 10)
+ case 'A' <= c && c <= 'F':
+ d = uint32(c - 'A' + 10)
+ default:
+ return -1, NewParserError(b[i:i+1], "non-hex character")
+ }
+ r = r*16 + d
+ }
+
+ if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 {
+ return -1, NewParserError(b, "escape sequence is invalid Unicode code point")
+ }
+
+ return rune(r), nil
+}
+
+func (p *Parser) parseWhitespace(b []byte) []byte {
+ // ws = *wschar
+ // wschar = %x20 ; Space
+ // wschar =/ %x09 ; Horizontal tab
+ _, rest := scanWhitespace(b)
+
+ return rest
+}
+
+//nolint:cyclop
+func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) {
+ switch b[0] {
+ case 'i':
+ if !scanFollowsInf(b) {
+ return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'")
+ }
+
+ return p.builder.Push(Node{
+ Kind: Float,
+ Data: b[:3],
+ Raw: p.Range(b[:3]),
+ }), b[3:], nil
+ case 'n':
+ if !scanFollowsNan(b) {
+ return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'")
+ }
+
+ return p.builder.Push(Node{
+ Kind: Float,
+ Data: b[:3],
+ Raw: p.Range(b[:3]),
+ }), b[3:], nil
+ case '+', '-':
+ return p.scanIntOrFloat(b)
+ }
+
+ if len(b) < 3 {
+ return p.scanIntOrFloat(b)
+ }
+
+ s := 5
+ if len(b) < s {
+ s = len(b)
+ }
+
+ for idx, c := range b[:s] {
+ if isDigit(c) {
+ continue
+ }
+
+ if idx == 2 && c == ':' || (idx == 4 && c == '-') {
+ return p.scanDateTime(b)
+ }
+
+ break
+ }
+
+ return p.scanIntOrFloat(b)
+}
+
+func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) {
+ // scans for contiguous characters in [0-9T:Z.+-], and up to one space if
+ // followed by a digit.
+ hasDate := false
+ hasTime := false
+ hasTz := false
+ seenSpace := false
+
+ i := 0
+byteLoop:
+ for ; i < len(b); i++ {
+ c := b[i]
+
+ switch {
+ case isDigit(c):
+ case c == '-':
+ hasDate = true
+ const minOffsetOfTz = 8
+ if i >= minOffsetOfTz {
+ hasTz = true
+ }
+ case c == 'T' || c == 't' || c == ':' || c == '.':
+ hasTime = true
+ case c == '+' || c == '-' || c == 'Z' || c == 'z':
+ hasTz = true
+ case c == ' ':
+ if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) {
+ i += 2
+ // Avoid reaching past the end of the document in case the time
+ // is malformed. See TestIssue585.
+ if i >= len(b) {
+ i--
+ }
+ seenSpace = true
+ hasTime = true
+ } else {
+ break byteLoop
+ }
+ default:
+ break byteLoop
+ }
+ }
+
+ var kind Kind
+
+ if hasTime {
+ if hasDate {
+ if hasTz {
+ kind = DateTime
+ } else {
+ kind = LocalDateTime
+ }
+ } else {
+ kind = LocalTime
+ }
+ } else {
+ kind = LocalDate
+ }
+
+ return p.builder.Push(Node{
+ Kind: kind,
+ Data: b[:i],
+ }), b[i:], nil
+}
+
+//nolint:funlen,gocognit,cyclop
+func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
+ i := 0
+
+ if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' {
+ var isValidRune validRuneFn
+
+ switch b[1] {
+ case 'x':
+ isValidRune = isValidHexRune
+ case 'o':
+ isValidRune = isValidOctalRune
+ case 'b':
+ isValidRune = isValidBinaryRune
+ default:
+ i++
+ }
+
+ if isValidRune != nil {
+ i += 2
+ for ; i < len(b); i++ {
+ if !isValidRune(b[i]) {
+ break
+ }
+ }
+ }
+
+ return p.builder.Push(Node{
+ Kind: Integer,
+ Data: b[:i],
+ Raw: p.Range(b[:i]),
+ }), b[i:], nil
+ }
+
+ isFloat := false
+
+ for ; i < len(b); i++ {
+ c := b[i]
+
+ if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' {
+ continue
+ }
+
+ if c == '.' || c == 'e' || c == 'E' {
+ isFloat = true
+
+ continue
+ }
+
+ if c == 'i' {
+ if scanFollowsInf(b[i:]) {
+ return p.builder.Push(Node{
+ Kind: Float,
+ Data: b[:i+3],
+ Raw: p.Range(b[:i+3]),
+ }), b[i+3:], nil
+ }
+
+ return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number")
+ }
+
+ if c == 'n' {
+ if scanFollowsNan(b[i:]) {
+ return p.builder.Push(Node{
+ Kind: Float,
+ Data: b[:i+3],
+ Raw: p.Range(b[:i+3]),
+ }), b[i+3:], nil
+ }
+
+ return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number")
+ }
+
+ break
+ }
+
+ if i == 0 {
+ return invalidReference, b, NewParserError(b, "incomplete number")
+ }
+
+ kind := Integer
+
+ if isFloat {
+ kind = Float
+ }
+
+ return p.builder.Push(Node{
+ Kind: kind,
+ Data: b[:i],
+ Raw: p.Range(b[:i]),
+ }), b[i:], nil
+}
+
+func isDigit(r byte) bool {
+ return r >= '0' && r <= '9'
+}
+
+type validRuneFn func(r byte) bool
+
+func isValidHexRune(r byte) bool {
+ return r >= 'a' && r <= 'f' ||
+ r >= 'A' && r <= 'F' ||
+ r >= '0' && r <= '9' ||
+ r == '_'
+}
+
+func isValidOctalRune(r byte) bool {
+ return r >= '0' && r <= '7' || r == '_'
+}
+
+func isValidBinaryRune(r byte) bool {
+ return r == '0' || r == '1' || r == '_'
+}
+
+func expect(x byte, b []byte) ([]byte, error) {
+ if len(b) == 0 {
+ return nil, NewParserError(b, "expected character %c but the document ended here", x)
+ }
+
+ if b[0] != x {
+ return nil, NewParserError(b[0:1], "expected character %c", x)
+ }
+
+ return b[1:], nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
new file mode 100644
index 000000000000..0512181d287a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
@@ -0,0 +1,270 @@
+package unstable
+
+import "github.com/pelletier/go-toml/v2/internal/characters"
+
+func scanFollows(b []byte, pattern string) bool {
+ n := len(pattern)
+
+ return len(b) >= n && string(b[:n]) == pattern
+}
+
+func scanFollowsMultilineBasicStringDelimiter(b []byte) bool {
+ return scanFollows(b, `"""`)
+}
+
+func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool {
+ return scanFollows(b, `'''`)
+}
+
+func scanFollowsTrue(b []byte) bool {
+ return scanFollows(b, `true`)
+}
+
+func scanFollowsFalse(b []byte) bool {
+ return scanFollows(b, `false`)
+}
+
+func scanFollowsInf(b []byte) bool {
+ return scanFollows(b, `inf`)
+}
+
+func scanFollowsNan(b []byte) bool {
+ return scanFollows(b, `nan`)
+}
+
+func scanUnquotedKey(b []byte) ([]byte, []byte) {
+ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+ for i := 0; i < len(b); i++ {
+ if !isUnquotedKeyChar(b[i]) {
+ return b[:i], b[i:]
+ }
+ }
+
+ return b, b[len(b):]
+}
+
+func isUnquotedKeyChar(r byte) bool {
+ return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_'
+}
+
+func scanLiteralString(b []byte) ([]byte, []byte, error) {
+ // literal-string = apostrophe *literal-char apostrophe
+ // apostrophe = %x27 ; ' apostrophe
+ // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
+ for i := 1; i < len(b); {
+ switch b[i] {
+ case '\'':
+ return b[:i+1], b[i+1:], nil
+ case '\n', '\r':
+ return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines")
+ }
+ size := characters.Utf8ValidNext(b[i:])
+ if size == 0 {
+ return nil, nil, NewParserError(b[i:i+1], "invalid character")
+ }
+ i += size
+ }
+
+ return nil, nil, NewParserError(b[len(b):], "unterminated literal string")
+}
+
+func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
+ // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
+ // ml-literal-string-delim
+ // ml-literal-string-delim = 3apostrophe
+ // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
+ //
+ // mll-content = mll-char / newline
+ // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
+ // mll-quotes = 1*2apostrophe
+ for i := 3; i < len(b); {
+ switch b[i] {
+ case '\'':
+ if scanFollowsMultilineLiteralStringDelimiter(b[i:]) {
+ i += 3
+
+ // At that point we found 3 apostrophe, and i is the
+ // index of the byte after the third one. The scanner
+ // needs to be eager, because there can be an extra 2
+ // apostrophe that can be accepted at the end of the
+ // string.
+
+ if i >= len(b) || b[i] != '\'' {
+ return b[:i], b[i:], nil
+ }
+ i++
+
+ if i >= len(b) || b[i] != '\'' {
+ return b[:i], b[i:], nil
+ }
+ i++
+
+ if i < len(b) && b[i] == '\'' {
+ return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string")
+ }
+
+ return b[:i], b[i:], nil
+ }
+ case '\r':
+ if len(b) < i+2 {
+ return nil, nil, NewParserError(b[len(b):], `need a \n after \r`)
+ }
+ if b[i+1] != '\n' {
+ return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`)
+ }
+ i += 2 // skip the \n
+ continue
+ }
+ size := characters.Utf8ValidNext(b[i:])
+ if size == 0 {
+ return nil, nil, NewParserError(b[i:i+1], "invalid character")
+ }
+ i += size
+ }
+
+ return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`)
+}
+
+func scanWindowsNewline(b []byte) ([]byte, []byte, error) {
+ const lenCRLF = 2
+ if len(b) < lenCRLF {
+ return nil, nil, NewParserError(b, "windows new line expected")
+ }
+
+ if b[1] != '\n' {
+ return nil, nil, NewParserError(b, `windows new line should be \r\n`)
+ }
+
+ return b[:lenCRLF], b[lenCRLF:], nil
+}
+
+func scanWhitespace(b []byte) ([]byte, []byte) {
+ for i := 0; i < len(b); i++ {
+ switch b[i] {
+ case ' ', '\t':
+ continue
+ default:
+ return b[:i], b[i:]
+ }
+ }
+
+ return b, b[len(b):]
+}
+
+func scanComment(b []byte) ([]byte, []byte, error) {
+ // comment-start-symbol = %x23 ; #
+ // non-ascii = %x80-D7FF / %xE000-10FFFF
+ // non-eol = %x09 / %x20-7F / non-ascii
+ //
+ // comment = comment-start-symbol *non-eol
+
+ for i := 1; i < len(b); {
+ if b[i] == '\n' {
+ return b[:i], b[i:], nil
+ }
+ if b[i] == '\r' {
+ if i+1 < len(b) && b[i+1] == '\n' {
+ return b[:i+1], b[i+1:], nil
+ }
+ return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
+ }
+ size := characters.Utf8ValidNext(b[i:])
+ if size == 0 {
+ return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
+ }
+
+ i += size
+ }
+
+ return b, b[len(b):], nil
+}
+
+func scanBasicString(b []byte) ([]byte, bool, []byte, error) {
+ // basic-string = quotation-mark *basic-char quotation-mark
+ // quotation-mark = %x22 ; "
+ // basic-char = basic-unescaped / escaped
+ // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // escaped = escape escape-seq-char
+ escaped := false
+ i := 1
+
+ for ; i < len(b); i++ {
+ switch b[i] {
+ case '"':
+ return b[:i+1], escaped, b[i+1:], nil
+ case '\n', '\r':
+ return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines")
+ case '\\':
+ if len(b) < i+2 {
+ return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\")
+ }
+ escaped = true
+ i++ // skip the next character
+ }
+ }
+
+ return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`)
+}
+
+func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
+ // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
+ // ml-basic-string-delim
+ // ml-basic-string-delim = 3quotation-mark
+ // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
+ //
+ // mlb-content = mlb-char / newline / mlb-escaped-nl
+ // mlb-char = mlb-unescaped / escaped
+ // mlb-quotes = 1*2quotation-mark
+ // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // mlb-escaped-nl = escape ws newline *( wschar / newline )
+
+ escaped := false
+ i := 3
+
+ for ; i < len(b); i++ {
+ switch b[i] {
+ case '"':
+ if scanFollowsMultilineBasicStringDelimiter(b[i:]) {
+ i += 3
+
+ // At that point we found 3 apostrophe, and i is the
+ // index of the byte after the third one. The scanner
+ // needs to be eager, because there can be an extra 2
+ // apostrophe that can be accepted at the end of the
+ // string.
+
+ if i >= len(b) || b[i] != '"' {
+ return b[:i], escaped, b[i:], nil
+ }
+ i++
+
+ if i >= len(b) || b[i] != '"' {
+ return b[:i], escaped, b[i:], nil
+ }
+ i++
+
+ if i < len(b) && b[i] == '"' {
+ return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`)
+ }
+
+ return b[:i], escaped, b[i:], nil
+ }
+ case '\\':
+ if len(b) < i+2 {
+ return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\")
+ }
+ escaped = true
+ i++ // skip the next character
+ case '\r':
+ if len(b) < i+2 {
+ return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`)
+ }
+ if b[i+1] != '\n' {
+ return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`)
+ }
+ i++ // skip the \n
+ }
+ }
+
+ return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`)
+}
diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go
new file mode 100644
index 000000000000..00cfd6de4581
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go
@@ -0,0 +1,7 @@
+package unstable
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a TOML document.
+type Unmarshaler interface {
+ UnmarshalTOML(value *Node) error
+}
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
index dd878a30ee9a..b9cc55abbb0a 100644
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-/~https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
new file mode 100644
index 000000000000..65d761bc9f28
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
new file mode 100644
index 000000000000..8547c8dfd187
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package header provides functions for parsing HTTP headers.
+package header
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// AcceptSpec describes an Accept* header.
+type AcceptSpec struct {
+ Value string
+ Q float64
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec AcceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ if !strings.HasPrefix(s, "q=") {
+ continue loop
+ }
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ q = 0
+ case s[0] == '1':
+ q = 1
+ default:
+ return -1, ""
+ }
+ s = s[1:]
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
new file mode 100644
index 000000000000..2e45780b74b9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package httputil
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := header.ParseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go
index 3aa8d0590ba5..b22d862fbc63 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go
@@ -22,7 +22,7 @@ import "github.com/prometheus/client_golang/prometheus"
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
-// for experiments and prototying, but you should seriously consider a more
+// for experiments and prototyping, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
index 246c5ea943c8..cc4ef1077e8c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
@@ -28,6 +28,8 @@ var (
MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")}
// MetricsGC allows only GC metrics to be collected from Go runtime.
// e.g. go_gc_cycles_automatic_gc_cycles_total
+ // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics.
+ // Use custom metric rule to access those.
MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)}
// MetricsMemory allows only memory metrics to be collected from Go runtime.
// e.g. go_memory_classes_heap_free_bytes
@@ -35,6 +37,9 @@ var (
// MetricsScheduler allows only scheduler metrics to be collected from Go runtime.
// e.g. go_sched_goroutines_goroutines
MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)}
+ // MetricsDebug allows only debug metrics to be collected from Go runtime.
+ // e.g. go_godebug_non_default_behavior_gocachetest_events_total
+ MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)}
)
// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as:
@@ -42,7 +47,6 @@ var (
// go_memstats_alloc_bytes
// go_memstats_alloc_bytes_total
// go_memstats_sys_bytes
-// go_memstats_lookups_total
// go_memstats_mallocs_total
// go_memstats_frees_total
// go_memstats_heap_alloc_bytes
@@ -130,16 +134,19 @@ type GoCollectionOption uint32
const (
// GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure.
- // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector.
+ //
+ // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector.
GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota
// GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package.
- // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})
+ //
+ // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})
// function to enable those metrics in the collector.
GoRuntimeMetricsCollection
)
// WithGoCollections allows enabling different collections for Go collector on top of base metrics.
-// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics.
+//
+// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics.
func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) {
return func(options *internal.GoCollectorOptions) {
if flags&GoRuntimeMemStatsCollection == 0 {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index a912b75a05b7..4ce84e7a80e5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -20,6 +20,7 @@ import (
"time"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// Counter is a Metric that represents a single numerical value that only ever
@@ -59,6 +60,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+ CounterOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
@@ -78,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
- result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
return result
}
@@ -94,10 +111,12 @@ type counter struct {
selfCollector
desc *Desc
+ createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
- now func() time.Time // To mock out time.Now() for testing.
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
}
func (c *counter) Desc() *Desc {
@@ -147,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error {
exemplar = e.(*dto.Exemplar)
}
val := c.get()
-
- return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
+ return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
}
func (c *counter) updateExemplar(v float64, l Labels) {
@@ -174,19 +192,31 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
+ return V2.NewCounterVec(CounterVecOpts{
+ CounterOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
+ if opts.now == nil {
+ opts.now = time.Now
+ }
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
- result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
+ result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
return result
}),
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 8bc5e44e2fc4..68ffe3c24807 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -14,20 +14,16 @@
package prometheus
import (
- "errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/client_golang/prometheus/internal"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
@@ -54,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
+ // variableLabels contains names of labels and normalization function for
+ // which the metric maintains variable values.
+ variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@@ -80,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
- variableLabels: variableLabels,
+ variableLabels: variableLabels.compile(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ for _, label := range d.variableLabels.names {
+ if !checkLabelName(label) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d
}
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
+ labelNames = append(labelNames, "$"+label)
+ labelNameSet[label] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
+ d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d
}
@@ -179,11 +189,19 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
+ vlStrings := make([]string, 0, len(d.variableLabels.names))
+ for _, vl := range d.variableLabels.names {
+ if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+ vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+ } else {
+ vlStrings = append(vlStrings, vl)
+ }
+ }
return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
- d.variableLabels,
+ strings.Join(vlStrings, ","),
)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 811072cbd54f..962608f02c65 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -37,35 +37,35 @@
//
// type metrics struct {
// cpuTemp prometheus.Gauge
-// hdFailures *prometheus.CounterVec
+// hdFailures *prometheus.CounterVec
// }
//
// func NewMetrics(reg prometheus.Registerer) *metrics {
-// m := &metrics{
-// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// }),
-// hdFailures: prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// ),
-// }
-// reg.MustRegister(m.cpuTemp)
-// reg.MustRegister(m.hdFailures)
-// return m
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
// }
//
// func main() {
-// // Create a non-global registry.
-// reg := prometheus.NewRegistry()
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
//
-// // Create new metrics and register them using the custom registry.
-// m := NewMetrics(reg)
-// // Set values for the new created metrics.
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
// m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
index c41ab37f3bb9..de5a85629318 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
continue
}
var v interface{}
- labels := make([]string, len(desc.variableLabels))
+ labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 21271a5bb462..dd2eac940675 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+ GaugeOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
@@ -123,7 +135,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
- return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
+ return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
+ return V2.NewGaugeVec(GaugeVecOpts{
+ GaugeOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index ad9a71a5e0d4..520cbd7d418f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -22,13 +22,13 @@ import (
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
-// populated using runtime/metrics.
+// populated using runtime/metrics. Those are the defaults we can't alter.
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
+ "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
@@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
+ "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
@@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
+ "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
+ // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+ "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
@@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
- "Total number of frees.",
+ "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
@@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
+ "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
@@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
+ "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
@@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
+ "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
@@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
+ "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
@@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
+ "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
@@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
- "Number of allocated objects.",
+ "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
@@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
+ "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
@@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
+ "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
@@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
+ "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
@@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
+ "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
@@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
+ "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
@@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
+ "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
@@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
+ "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
@@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
+ "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
@@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
+ "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
@@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
+ "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
@@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
+ "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
"go_memstats_last_gc_time_seconds",
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index 3a2d55e84b1e..511746417291 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -17,17 +17,17 @@
package prometheus
import (
+ "fmt"
"math"
"runtime"
"runtime/metrics"
"strings"
"sync"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
-
"github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
const (
@@ -154,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions {
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
},
RuntimeMetricRules: []internal.GoCollectorRule{
- //{Matcher: regexp.MustCompile("")},
+ // Recommended metrics we want by default from runtime/metrics.
+ {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
},
}
}
@@ -204,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
+ help := attachOriginalName(d.Description.Description, d.Name)
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
@@ -215,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
- d.Description.Description,
+ help,
nil,
nil,
),
@@ -227,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description.Description,
+ Help: help,
},
)
} else {
@@ -235,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description.Description,
+ Help: help,
})
}
metricSet = append(metricSet, m)
@@ -285,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
}
}
+func attachOriginalName(desc, origName string) string {
+ return fmt.Sprintf("%s Sourced from %s", desc, origName)
+}
+
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
@@ -377,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
- panic("unexpected unsupported metric")
+ panic("unexpected bad kind metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
- panic("unexpected unsupported metric kind")
+ panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 4c873a01c3d3..8d35f2d8ae96 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -22,10 +22,10 @@ import (
"sync/atomic"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
@@ -392,7 +392,7 @@ type HistogramOpts struct {
// zero, it is replaced by default buckets. The default buckets are
// DefBuckets if no buckets for a native histogram (see below) are used,
// otherwise the default is no buckets. (In other words, if you want to
- // use both reguler buckets and buckets for a native histogram, you have
+ // use both regular buckets and buckets for a native histogram, you have
// to define the regular buckets here explicitly.)
Buckets []float64
@@ -402,7 +402,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see
- // SparseBucketsZeroThreshold below). From any one bucket to the next,
+ // NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller
@@ -414,8 +414,8 @@ type HistogramOpts struct {
// and 2, same as between 2 and 4, and 4 and 8, etc.).
//
// Details about the actually used factor: The factor is calculated as
- // 2^(2^n), where n is an integer number between (and including) -8 and
- // 4. n is chosen so that the resulting factor is the largest that is
+ // 2^(2^-n), where n is an integer number between (and including) -4 and
+ // 8. n is chosen so that the resulting factor is the largest that is
// still smaller or equal to NativeHistogramBucketFactor. Note that the
// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
@@ -429,44 +429,83 @@ type HistogramOpts struct {
// a major version bump.
NativeHistogramBucketFactor float64
// All observations with an absolute value of less or equal
- // NativeHistogramZeroThreshold are accumulated into a “zero”
- // bucket. For best results, this should be close to a bucket
- // boundary. This is usually the case if picking a power of two. If
+ // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+ // For best results, this should be close to a bucket boundary. This is
+ // usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
- // DefSparseBucketsZeroThreshold is used as the threshold. To configure
- // a zero bucket with an actual threshold of zero (i.e. only
+ // DefNativeHistogramZeroThreshold is used as the threshold. To
+ // configure a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
// constant (or any negative float value).
NativeHistogramZeroThreshold float64
- // The remaining fields define a strategy to limit the number of
+ // The next three fields define a strategy to limit the number of
// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
// at zero, the number of buckets is not limited. (Note that this might
// lead to unbounded memory consumption if the values observed by the
// Histogram are sufficiently wide-spread. In particular, this could be
// used as a DoS attack vector. Where the observed values depend on
// external inputs, it is highly recommended to set a
- // NativeHistogramMaxBucketNumber.) Once the set
+ // NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
- // enacted: First, if the last reset (or the creation) of the histogram
- // is at least NativeHistogramMinResetDuration ago, then the whole
- // histogram is reset to its initial state (including regular
- // buckets). If less time has passed, or if
- // NativeHistogramMinResetDuration is zero, no reset is
- // performed. Instead, the zero threshold is increased sufficiently to
- // reduce the number of buckets to or below
- // NativeHistogramMaxBucketNumber, but not to more than
- // NativeHistogramMaxZeroThreshold. Thus, if
- // NativeHistogramMaxZeroThreshold is already at or below the current
- // zero threshold, nothing happens at this step. After that, if the
- // number of buckets still exceeds NativeHistogramMaxBucketNumber, the
- // resolution of the histogram is reduced by doubling the width of the
- // sparse buckets (up to a growth factor between one bucket to the next
- // of 2^(2^4) = 65536, see above).
+ // enacted:
+ // - First, if the last reset (or the creation) of the histogram is at
+ // least NativeHistogramMinResetDuration ago, then the whole
+ // histogram is reset to its initial state (including regular
+ // buckets).
+ // - If less time has passed, or if NativeHistogramMinResetDuration is
+ // zero, no reset is performed. Instead, the zero threshold is
+ // increased sufficiently to reduce the number of buckets to or below
+ // NativeHistogramMaxBucketNumber, but not to more than
+ // NativeHistogramMaxZeroThreshold. Thus, if
+ // NativeHistogramMaxZeroThreshold is already at or below the current
+ // zero threshold, nothing happens at this step.
+ // - After that, if the number of buckets still exceeds
+ // NativeHistogramMaxBucketNumber, the resolution of the histogram is
+ // reduced by doubling the width of the sparse buckets (up to a
+ // growth factor between one bucket to the next of 2^(2^4) = 65536,
+ // see above).
+ // - Any increased zero threshold or reduced resolution is reset back
+ // to their original values once NativeHistogramMinResetDuration has
+ // passed (since the last reset or the creation of the histogram).
NativeHistogramMaxBucketNumber uint32
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
+
+ // NativeHistogramMaxExemplars limits the number of exemplars
+ // that are kept in memory for each native histogram. If you leave it at
+ // zero, a default value of 10 is used. If no exemplars should be kept specifically
+ // for native histograms, set it to a negative value. (Scrapers can
+ // still use the exemplars exposed for classic buckets, which are managed
+ // independently.)
+ NativeHistogramMaxExemplars int
+ // NativeHistogramExemplarTTL is only checked once
+ // NativeHistogramMaxExemplars is exceeded. In that case, the
+ // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+ // Otherwise, the older exemplar in the pair of exemplars that are closest
+ // together (on an exponential scale) is removed.
+ // If NativeHistogramExemplarTTL is left at its zero value, a default value of
+ // 5m is used. To always delete the oldest exemplar, set it to a negative value.
+ NativeHistogramExemplarTTL time.Duration
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+ HistogramOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -488,11 +527,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
- for _, n := range desc.variableLabels {
+ for _, n := range desc.variableLabels.names {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
@@ -503,6 +542,13 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ if opts.afterFunc == nil {
+ opts.afterFunc = time.AfterFunc
+ }
+
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@@ -510,8 +556,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
- lastResetTime: time.Now(),
- now: time.Now,
+ lastResetTime: opts.now(),
+ now: opts.now,
+ afterFunc: opts.afterFunc,
}
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
h.upperBounds = DefBuckets
@@ -526,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+ h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -544,16 +592,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
- h.counts[0] = &histogramCounts{
- buckets: make([]uint64, len(h.upperBounds)),
- nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
- nativeHistogramSchema: h.nativeHistogramSchema,
- }
- h.counts[1] = &histogramCounts{
- buckets: make([]uint64, len(h.upperBounds)),
- nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
- nativeHistogramSchema: h.nativeHistogramSchema,
- }
+ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@@ -632,8 +676,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 {
key--
}
- div := 1 << -schema
- key = (key + div - 1) / div
+ offset := (1 << -schema) - 1
+ key = (key + offset) >> -schema
}
if isInf {
key++
@@ -694,9 +738,19 @@ type histogram struct {
nativeHistogramMaxZeroThreshold float64
nativeHistogramMaxBuckets uint32
nativeHistogramMinResetDuration time.Duration
- lastResetTime time.Time // Protected by mtx.
-
- now func() time.Time // To mock out time.Now() for testing.
+ // lastResetTime is protected by mtx. It is also used as created timestamp.
+ lastResetTime time.Time
+ // resetScheduled is protected by mtx. It is true if a reset is
+ // scheduled for a later time (when nativeHistogramMinResetDuration has
+ // passed).
+ resetScheduled bool
+ nativeExemplars nativeExemplars
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
}
func (h *histogram) Desc() *Desc {
@@ -707,6 +761,9 @@ func (h *histogram) Observe(v float64) {
h.observe(v, h.findBucket(v))
}
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
@@ -735,9 +792,10 @@ func (h *histogram) Write(out *dto.Metric) error {
waitForCooldown(count, coldCounts)
his := &dto.Histogram{
- Bucket: make([]*dto.Bucket, len(h.upperBounds)),
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ Bucket: make([]*dto.Bucket, len(h.upperBounds)),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: timestamppb.New(h.lastResetTime),
}
out.Histogram = his
out.Label = h.labelPairs
@@ -775,6 +833,25 @@ func (h *histogram) Write(out *dto.Metric) error {
his.ZeroCount = proto.Uint64(zeroBucket)
his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+ // Add a no-op span to a histogram without observations and with
+ // a zero threshold of zero. Otherwise, a native histogram would
+ // look like a classic histogram to scrapers.
+ if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+ his.PositiveSpan = []*dto.BucketSpan{{
+ Offset: proto.Int32(0),
+ Length: proto.Uint32(0),
+ }}
+ }
+
+ // If exemplars are not configured, the cap will be 0.
+ // So append is not needed in this case.
+ if cap(h.nativeExemplars.exemplars) > 0 {
+ h.nativeExemplars.Lock()
+ his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+ h.nativeExemplars.Unlock()
+ }
+
}
addAndResetCounts(hotCounts, coldCounts)
return nil
@@ -810,7 +887,7 @@ func (h *histogram) observe(v float64, bucket int) {
}
}
-// limitSparsebuckets applies a strategy to limit the number of populated sparse
+// limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet
@@ -841,26 +918,39 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
return
}
+ // One of the other strategies will happen. To undo what they will do as
+ // soon as enough time has passed to satisfy
+ // h.nativeHistogramMinResetDuration, schedule a reset at the right time
+ // if we haven't done so already.
+ if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+ h.resetScheduled = true
+ h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+ }
+
if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
return
}
h.doubleBucketWidth(hotCounts, coldCounts)
}
-// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
-// has been passed. It returns true if the histogram has been reset. The caller
-// must have locked h.mtx.
-func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+ hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
// We are using the possibly mocked h.now() rather than
// time.Since(h.lastResetTime) to enable testing.
- if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+ if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+ h.resetScheduled || // Do not interefere if a reset is already scheduled.
+ h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
return false
}
// Completely reset coldCounts.
h.resetCounts(cold)
// Repeat the latest observation to not lose it completely.
cold.observe(value, bucket, true)
- // Make coldCounts the new hot counts while ressetting countAndHotIdx.
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
count := n & ((1 << 63) - 1)
waitForCooldown(count, hot)
@@ -870,6 +960,29 @@ func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value
return true
}
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hot := h.counts[hotIdx]
+ cold := h.counts[coldIdx]
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ h.resetScheduled = false
+}
+
// maybeWidenZeroBucket widens the zero bucket until it includes the existing
// buckets closest to the zero bucket (which could be two, if an equidistant
// negative and a positive bucket exists, but usually it's only one bucket to be
@@ -1009,8 +1122,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) {
deleteSyncMap(&counts.nativeHistogramBucketsPositive)
}
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
@@ -1020,6 +1135,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
panic(err)
}
h.exemplars[bucket].Store(e)
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ if doSparse {
+ h.nativeExemplars.addExemplar(e)
+ }
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -1034,15 +1153,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
+ return V2.NewHistogramVec(HistogramVecOpts{
+ HistogramOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
+ return newHistogram(desc, opts.HistogramOpts, lvs...)
}),
}
}
@@ -1161,6 +1288,7 @@ type constHistogram struct {
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
}
func (h *constHistogram) Desc() *Desc {
@@ -1168,7 +1296,9 @@ func (h *constHistogram) Desc() *Desc {
}
func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
+ his := &dto.Histogram{
+ CreatedTimestamp: h.createdTs,
+ }
buckets := make([]*dto.Bucket, 0, len(h.buckets))
@@ -1215,7 +1345,7 @@ func NewConstHistogram(
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constHistogram{
@@ -1243,6 +1373,48 @@ func MustNewConstHistogram(
return m
}
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
@@ -1309,7 +1481,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
// Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now,
- // we assume that gaps of one ore two buckets should not create
+ // we assume that gaps of one or two buckets should not create
// a new span.
iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 {
@@ -1482,3 +1654,142 @@ func addAndResetCounts(hot, cold *histogramCounts) {
atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
}
+
+type nativeExemplars struct {
+ sync.Mutex
+
+ ttl time.Duration
+ exemplars []*dto.Exemplar
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+ if ttl == 0 {
+ ttl = 5 * time.Minute
+ }
+
+ if maxCount == 0 {
+ maxCount = 10
+ }
+
+ if maxCount < 0 {
+ maxCount = 0
+ }
+
+ return nativeExemplars{
+ ttl: ttl,
+ exemplars: make([]*dto.Exemplar, 0, maxCount),
+ }
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+ if cap(n.exemplars) == 0 {
+ return
+ }
+
+ n.Lock()
+ defer n.Unlock()
+
+ // The index where to insert the new exemplar.
+ var nIdx int = -1
+
+ // When the number of exemplars has not yet exceeded or
+ // is equal to cap(n.exemplars), then
+ // insert the new exemplar directly.
+ if len(n.exemplars) < cap(n.exemplars) {
+ for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+ if *e.Value < *n.exemplars[nIdx].Value {
+ break
+ }
+ }
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+ return
+ }
+
+ // When the number of exemplars exceeds the limit, remove one exemplar.
+ var (
+ rIdx int // The index where to remove the old exemplar.
+
+ ot = time.Now() // Oldest timestamp seen.
+ otIdx = -1 // Index of the exemplar with the oldest timestamp.
+
+ md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+ mdIdx = -1 // Index of the older exemplar within the closest pair.
+ cLog float64 // Logarithm of the current exemplar.
+ pLog float64 // Logarithm of the previous exemplar.
+ )
+
+ for i, exemplar := range n.exemplars {
+ // Find the exemplar with the oldest timestamp.
+ if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+ ot = exemplar.Timestamp.AsTime()
+ otIdx = i
+ }
+
+ // Find the index at which to insert new the exemplar.
+ if *e.Value <= *exemplar.Value && nIdx == -1 {
+ nIdx = i
+ }
+
+ // Find the two closest exemplars and pick the one the with older timestamp.
+ pLog = cLog
+ cLog = math.Log(exemplar.GetValue())
+ if i == 0 {
+ continue
+ }
+ diff := math.Abs(cLog - pLog)
+ if md == -1 || diff < md {
+ md = diff
+ if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+ mdIdx = i
+ } else {
+ mdIdx = i - 1
+ }
+ }
+
+ }
+
+ // If all existing exemplar are smaller than new exemplar,
+ // then the exemplar should be inserted at the end.
+ if nIdx == -1 {
+ nIdx = len(n.exemplars)
+ }
+
+ if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+ rIdx = otIdx
+ } else {
+ // In the previous for loop, when calculating the closest pair of exemplars,
+ // we did not take into account the newly inserted exemplar.
+ // So we need to calculate with the newly inserted exemplar again.
+ elog := math.Log(e.GetValue())
+ if nIdx > 0 {
+ diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+ if diff < md {
+ md = diff
+ mdIdx = nIdx
+ if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) {
+ mdIdx = nIdx - 1
+ }
+ }
+ }
+ if nIdx < len(n.exemplars) {
+ diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+ if diff < md {
+ mdIdx = nIdx
+ if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) {
+ mdIdx = nIdx
+ }
+ }
+ }
+ rIdx = mdIdx
+ }
+
+ // Adjust the slice according to rIdx and nIdx.
+ switch {
+ case rIdx == nIdx:
+ n.exemplars[nIdx] = e
+ case rIdx < nIdx:
+ n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+ case rIdx > nIdx:
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
index fd0750f2cf50..a595a2036254 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -14,7 +14,7 @@
// It provides tools to compare sequences of strings and generate textual diffs.
//
// Maintaining `GetUnifiedDiffString` here because original repository
-// (/~https://github.com/pmezard/go-difflib) is no loger maintained.
+// (/~https://github.com/pmezard/go-difflib) is no longer maintained.
package internal
import (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
index 723b45d64444..a4fa6eabd788 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -30,3 +30,5 @@ type GoCollectorOptions struct {
RuntimeMetricSumForHist map[string]string
RuntimeMetricRules []GoCollectorRule
}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index c1b8fad36aeb..c21911f292d4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -32,6 +32,104 @@ import (
// create a Desc.
type Labels map[string]string
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+ Name string
+ Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+// prometheus.V2().NewCounterVec(CounterVecOpts{
+// CounterOpts: {...}, // Usual CounterOpts fields
+// VariableLabels: []ConstrainedLabels{
+// {Name: "A"},
+// {Name: "B", Constraint: func(v string) string { ... }},
+// },
+// })
+type ConstrainableLabels interface {
+ compile() *compiledLabels
+ labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+ compiled := &compiledLabels{
+ names: make([]string, len(cls)),
+ labelConstraints: map[string]LabelConstraint{},
+ }
+
+ for i, label := range cls {
+ compiled.names[i] = label.Name
+ if label.Constraint != nil {
+ compiled.labelConstraints[label.Name] = label.Constraint
+ }
+ }
+
+ return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+ names := make([]string, len(cls))
+ for i, label := range cls {
+ names[i] = label.Name
+ }
+ return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+// UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+// ConstrainedLabels {
+// { Name: "A" },
+// { Name: "B" },
+// }
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+ return &compiledLabels{
+ names: uls,
+ }
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+ return uls
+}
+
+type compiledLabels struct {
+ names []string
+ labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+ return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+ return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+ if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+ return fn(value)
+ }
+ return value
+}
+
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
@@ -67,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
+ // The call below makes vals escape, copy them to avoid that.
+ vals := append([]string(nil), vals...)
return fmt.Errorf(
"%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index b5119c50410e..9d9b81ab4481 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -20,11 +20,9 @@ import (
"strings"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
@@ -94,6 +92,9 @@ type Opts struct {
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
}
// BuildFQName joins the given three name components by "_". Empty name
@@ -233,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
)
for i, e := range exemplars {
ts := e.Timestamp
- if ts == (time.Time{}) {
+ if ts.IsZero() {
ts = now
}
exs[i], err = newExemplar(e.Value, ts, e.Labels)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 8548dd18ed5e..62a4e7ad9a06 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -22,14 +22,15 @@ import (
)
type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+ inBytes, outBytes *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
+ inBytes: NewDesc(
+ ns+"process_network_receive_bytes_total",
+ "Number of bytes received by the process over the network.",
+ nil, nil,
+ ),
+ outBytes: NewDesc(
+ ns+"process_network_transmit_bytes_total",
+ "Number of bytes sent by the process over the network.",
+ nil, nil,
+ ),
}
if opts.PidFn == nil {
@@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
}
// Collect returns the current state of all metrics of the collector.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index c0152cdb613a..14d56d2d0680 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !windows && !js
-// +build !windows,!js
+//go:build !windows && !js && !wasip1
+// +build !windows,!js,!wasip1
package prometheus
@@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
} else {
c.reportError(ch, nil, err)
}
+
+ if netstat, err := p.Netstat(); err == nil {
+ var inOctets, outOctets float64
+ if netstat.IpExt.InOctets != nil {
+ inOctets = *netstat.IpExt.InOctets
+ }
+ if netstat.IpExt.OutOctets != nil {
+ outOctets = *netstat.IpExt.OutOctets
+ }
+ ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+ ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+ } else {
+ c.reportError(ch, nil, err)
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
new file mode 100644
index 000000000000..d8d9a6d7a2fa
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1
+// +build wasip1
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (*processCollector) processCollect(chan<- Metric) {
+ // noop on this platform
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index 9819917b83b3..315eab5f179b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
+// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
+// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
+func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
+ return r.ResponseWriter
+}
+
type (
closeNotifierDelegator struct{ *responseWriterDelegator }
flusherDelegator struct{ *responseWriterDelegator }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index a4cc9810b072..2e0b9a864863 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -37,21 +37,36 @@ import (
"fmt"
"io"
"net/http"
- "strings"
+ "strconv"
"sync"
"time"
+ "github.com/klauspost/compress/zstd"
"github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
)
const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+ processStartTimeHeader = "Process-Start-Time-Unix"
)
+// Compression represents the content encodings handlers support for the HTTP
+// responses.
+type Compression string
+
+const (
+ Identity Compression = "identity"
+ Gzip Compression = "gzip"
+ Zstd Compression = "zstd"
+)
+
+var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
+
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
@@ -120,7 +135,22 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
}
+ // Select compression formats to offer based on default or user choice.
+ var compressions []string
+ if !opts.DisableCompression {
+ offers := defaultCompressionFormats
+ if len(opts.OfferedCompressions) > 0 {
+ offers = opts.OfferedCompressions
+ }
+ for _, comp := range offers {
+ compressions = append(compressions, string(comp))
+ }
+ }
+
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if !opts.ProcessStartTime.IsZero() {
+ rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
+ }
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@@ -160,20 +190,20 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} else {
contentType = expfmt.Negotiate(req.Header)
}
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
+ rsp.Header().Set(contentTypeHeader, string(contentType))
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
+ w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error getting writer", err)
+ }
+ w = io.Writer(rsp)
+ encodingHeader = string(Identity)
+ }
- gz.Reset(w)
- defer gz.Close()
+ defer closeWriter()
- w = gz
- }
+ rsp.Header().Set(contentEncodingHeader, encodingHeader)
enc := expfmt.NewEncoder(w, contentType)
@@ -338,9 +368,19 @@ type HandlerOpts struct {
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
+ // DisableCompression disables the response encoding (compression) and
+ // encoding negotiation. If true, the handler will
+ // never compress the response, even if requested
+ // by the client and the OfferedCompressions field is set.
DisableCompression bool
+ // OfferedCompressions is a set of encodings (compressions) handler will
+ // try to offer when negotiating with the client. This defaults to identity, gzip
+ // and zstd.
+ // NOTE: If handler can't agree with the client on the encodings or
+ // unsupported or empty encodings are set in OfferedCompressions,
+ // handler always fallbacks to no compression (identity), for
+ // compatibility reasons. In such cases ErrorLog will be used if set.
+ OfferedCompressions []Compression
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
@@ -366,19 +406,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
+ // ProcessStartTime allows setting process start timevalue that will be exposed
+ // with "Process-Start-Time-Unix" response header along with the metrics
+ // payload. This allow callers to have efficient transformations to cumulative
+ // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
+ // scrape target.
+ // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
+ // exposition format.
+ ProcessStartTime time.Time
}
// httpError removes any content-encoding header and then calls http.Error with
@@ -393,3 +428,38 @@ func httpError(rsp http.ResponseWriter, err error) {
http.StatusInternalServerError,
)
}
+
+// negotiateEncodingWriter reads the Accept-Encoding header from a request and
+// selects the right compression based on an allow-list of supported
+// compressions. It returns a writer implementing the compression and an the
+// correct value that the caller can set in the response header.
+func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
+ if len(compressions) == 0 {
+ return rw, string(Identity), func() {}, nil
+ }
+
+ // TODO(mrueg): Replace internal/github.com/gddo once /~https://github.com/golang/go/issues/19307 is implemented.
+ selected := httputil.NegotiateContentEncoding(r, compressions)
+
+ switch selected {
+ case "zstd":
+ // TODO(mrueg): Replace klauspost/compress with stdlib implementation once /~https://github.com/golang/go/issues/62513 is implemented.
+ z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
+ if err != nil {
+ return nil, "", func() {}, err
+ }
+
+ z.Reset(rw)
+ return z, selected, func() { _ = z.Close() }, nil
+ case "gzip":
+ gz := gzipPool.Get().(*gzip.Writer)
+ gz.Reset(rw)
+ return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
+ case "identity":
+ // This means the content is not compressed.
+ return rw, selected, func() {}, nil
+ default:
+ // The content encoding was not implemented yet.
+ return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 21086781621f..d3482c40ca76 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
o.apply(rtOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- addWithExemplar(
- counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
- 1,
- rtOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
@@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
o.apply(rtOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
- time.Since(start).Seconds(),
- rtOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index cca67a78a90d..356edb7868cc 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
@@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
-
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
@@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
o.apply(hOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- addWithExemplar(
- counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- 1,
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- addWithExemplar(
- counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- 1,
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
@@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, status, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
})
next.ServeHTTP(d, r)
}
@@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
+
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- float64(size),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- float64(size),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
@@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- float64(d.Written()),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
})
}
@@ -380,15 +389,12 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
return true
}
-// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
-// unnecessary allocations on each request.
-var emptyLabels = prometheus.Labels{}
-
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
+ labels := prometheus.Labels{}
+
if !(code || method) {
- return emptyLabels
+ return labels
}
- labels := prometheus.Labels{}
if code {
labels["code"] = sanitizeCode(status)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index c590d912c947..5d4383aa14a3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -24,14 +24,32 @@ type Option interface {
apply(*options)
}
+// LabelValueFromCtx are used to compute the label value from request context.
+// Context can be filled with values from request through middleware.
+type LabelValueFromCtx func(ctx context.Context) string
+
// options store options for both a handler or round tripper.
type options struct {
- extraMethods []string
- getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraMethods []string
+ getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraLabelsFromCtx map[string]LabelValueFromCtx
}
func defaultOptions() *options {
- return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
+ return &options{
+ getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
+ extraLabelsFromCtx: map[string]LabelValueFromCtx{},
+ }
+}
+
+func (o *options) emptyDynamicLabels() prometheus.Labels {
+ labels := prometheus.Labels{}
+
+ for label := range o.extraLabelsFromCtx {
+ labels[label] = ""
+ }
+
+ return labels
}
type optionApplyFunc func(*options)
@@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
})
}
-// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
-// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
-// will get instrumented without exemplar.
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
+// metric will continue to observe/increment.
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
return optionApplyFunc(func(o *options) {
o.getExemplarFn = getExemplarFn
})
}
+
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
+ return optionApplyFunc(func(o *options) {
+ o.extraLabelsFromCtx[name] = valueFn
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 09e34d307c97..c6fd2f58b742 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -21,18 +21,17 @@ import (
"path/filepath"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"unicode/utf8"
- "github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/prometheus/common/expfmt"
+ "google.golang.org/protobuf/proto"
)
const (
@@ -315,16 +314,17 @@ func (r *Registry) Register(c Collector) error {
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
+ continue
+ }
+
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
+ continue
}
+ newDimHashesByName[desc.fqName] = desc.dimHash
}
// A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
@@ -549,7 +549,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
goroutineBudget--
runtime.Gosched()
}
- // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here.
if cmc == nil && umc == nil {
@@ -933,6 +933,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
+ if dtoMetric.TimestampMs != nil {
+ h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+ h.Write(separatorByteSlice)
+ }
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
@@ -960,7 +964,7 @@ func checkDescConsistency(
// Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs)
- for _, l := range desc.variableLabels {
+ for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l),
})
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 7bc448a89394..1ab0e4796550 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -22,11 +22,11 @@ import (
"sync/atomic"
"time"
- "github.com/beorn7/perks/quantile"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/beorn7/perks/quantile"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// quantileLabel is used for the label that defines the quantile in a
@@ -146,6 +146,21 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile").
BufCap uint32
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+ SummaryOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
}
// Problem with the sliding-window decay algorithm... The Merge method of
@@ -177,11 +192,11 @@ func NewSummary(opts SummaryOpts) Summary {
}
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
- for _, n := range desc.variableLabels {
+ for _, n := range desc.variableLabels.names {
if n == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
@@ -211,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap
}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
@@ -219,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
return s
}
@@ -234,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
}
- s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ {
@@ -248,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
return s
}
@@ -275,6 +295,8 @@ type summary struct {
headStream *quantile.Stream
headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time
+
+ createdTs *timestamppb.Timestamp
}
func (s *summary) Desc() *Desc {
@@ -296,7 +318,9 @@ func (s *summary) Observe(v float64) {
}
func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock()
@@ -429,6 +453,8 @@ type noObjectivesSummary struct {
counts [2]*summaryCounts
labelPairs []*dto.LabelPair
+
+ createdTs *timestamppb.Timestamp
}
func (s *noObjectivesSummary) Desc() *Desc {
@@ -479,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
}
sum := &dto.Summary{
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: s.createdTs,
}
out.Summary = sum
@@ -530,20 +557,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- for _, ln := range labelNames {
+ return V2.NewSummaryVec(SummaryVecOpts{
+ SummaryOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+ for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
- desc := NewDesc(
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
+ return newSummary(desc, opts.SummaryOpts, lvs...)
}),
}
}
@@ -662,6 +697,7 @@ type constSummary struct {
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
}
func (s *constSummary) Desc() *Desc {
@@ -669,7 +705,9 @@ func (s *constSummary) Desc() *Desc {
}
func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
@@ -718,7 +756,7 @@ func NewConstSummary(
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constSummary{
@@ -745,3 +783,45 @@ func MustNewConstSummary(
}
return m
}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index f28a76f3a62a..52344fef53f5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -23,7 +23,9 @@ type Timer struct {
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
// following way:
//
// func TimeMe() {
@@ -31,6 +33,14 @@ type Timer struct {
// defer timer.ObserveDuration()
// // Do actual work.
// }
+//
+// or
+//
+// func TimeMeWithExemplar() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDurationWithExemplar(exemplar)
+// // Do actual work.
+// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
@@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
}
return d
}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+ d := time.Since(t.begin)
+ eo, ok := t.observer.(ExemplarObserver)
+ if ok && exemplar != nil {
+ eo.ObserveWithExemplar(d.Seconds(), exemplar)
+ return d
+ }
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 2d3abc1cbd68..cc23011fad23 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,18 +14,17 @@
package prometheus
import (
+ "errors"
"fmt"
"sort"
"time"
"unicode/utf8"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -93,7 +92,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
+ return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@@ -107,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
metric := &dto.Metric{}
- if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil {
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
return nil, err
}
@@ -132,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
return m
}
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ switch valueType {
+ case CounterValue:
+ break
+ default:
+ return nil, errors.New("created timestamps are only supported for counters")
+ }
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+ return nil, err
+ }
+
+ return &constMetric{
+ desc: desc,
+ metric: metric,
+ }, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+ m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type constMetric struct {
desc *Desc
metric *dto.Metric
@@ -155,11 +191,12 @@ func populateMetric(
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
+ ct *timestamppb.Timestamp,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
+ m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@@ -178,19 +215,19 @@ func populateMetric(
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
return nil
}
- if len(desc.variableLabels) == 0 {
+ if len(desc.variableLabels.names) == 0 {
// Moderately fast path.
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
+ for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
+ Name: proto.String(l),
Value: proto.String(labelValues[i]),
})
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 7ae322590c86..2c808eece0a0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -72,6 +72,8 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@@ -91,6 +93,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
h, err := m.hashLabels(labels)
if err != nil {
return false
@@ -106,6 +111,9 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
return m.metricMap.deleteByLabels(labels, m.curry)
}
@@ -144,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
oldCurry = m.curry
iCurry int
)
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
- return nil, fmt.Errorf("label name %q is already curried", label)
+ return nil, fmt.Errorf("label name %q is already curried", labelName)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@@ -156,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
- newCurry = append(newCurry, curriedLabelValue{i, val})
+ newCurry = append(newCurry, curriedLabelValue{
+ i,
+ m.desc.variableLabels.constrain(labelName, val),
+ })
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@@ -199,6 +210,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@@ -224,6 +236,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@@ -233,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@@ -242,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
curry = m.curry
iVals, iCurry int
)
- for i := 0; i < len(m.desc.variableLabels); i++ {
+ for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
@@ -256,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@@ -265,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
curry = m.curry
iCurry int
)
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
- return 0, fmt.Errorf("label name %q is already curried", label)
+ return 0, fmt.Errorf("label name %q is already curried", labelName)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
+ return 0, fmt.Errorf("label name %q missing in label map", labelName)
}
h = m.hashAdd(h, val)
}
@@ -453,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels {
// Check if the target label exists in our metrics and get the index.
- varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
if validLabel {
// Check the value of that label against the target value.
// We don't consider curried values in partial matches.
@@ -492,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues(
return metric
}
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
@@ -597,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
return false
}
iCurry := 0
- for i, k := range desc.variableLabels {
+ for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value {
return false
@@ -615,7 +630,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry))
iCurry := 0
- for i, k := range desc.variableLabels {
+ for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
@@ -640,3 +655,55 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
return labelValues
}
+
+var labelsPool = &sync.Pool{
+ New: func() interface{} {
+ return make(Labels)
+ },
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return labels, func() {}
+ }
+
+ constrainedLabels := labelsPool.Get().(Labels)
+ for l, v := range labels {
+ constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+ }
+
+ return constrainedLabels, func() {
+ for k := range constrainedLabels {
+ delete(constrainedLabels, k)
+ }
+ labelsPool.Put(constrainedLabels)
+ }
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return lvs
+ }
+
+ constrainedValues := make([]string, len(lvs))
+ var iCurry, iLVs int
+ for i := 0; i < len(lvs)+len(curry); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ iCurry++
+ continue
+ }
+
+ if i < len(desc.variableLabels.names) {
+ constrainedValues[iLVs] = desc.variableLabels.constrain(
+ desc.variableLabels.names[i],
+ lvs[iLVs],
+ )
+ } else {
+ constrainedValues[iLVs] = lvs[iLVs]
+ }
+ iLVs++
+ }
+ return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 000000000000..42bc3a8f0661
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 1498ee144cb0..25da157f152c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,12 +17,10 @@ import (
"fmt"
"sort"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "google.golang.org/protobuf/proto"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
- newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 35904ea19861..2f15490758ab 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,25 +1,38 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type MetricType int32
@@ -38,23 +51,25 @@ const (
MetricType_GAUGE_HISTOGRAM MetricType = 5
)
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
- 5: "GAUGE_HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
- "GAUGE_HISTOGRAM": 5,
-}
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
func (x MetricType) Enum() *MetricType {
p := new(MetricType)
@@ -63,449 +78,555 @@ func (x MetricType) Enum() *MetricType {
}
func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = MetricType(value)
+ *x = MetricType(num)
return nil
}
+// Deprecated: Use MetricType.Descriptor instead.
func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return ""
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{1}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{2}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Counter proto.InternalMessageInfo
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
-type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
}
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{3}
-}
+type Quantile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
+
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
}
return 0
}
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{4}
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
-}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Summary proto.InternalMessageInfo
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
}
return nil
}
-type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
}
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{5}
-}
+type Untyped struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
+
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Histogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
- ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
- ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
- ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
// Negative buckets for the native histogram.
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
// Use either "negative_delta" or "negative_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
- NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
// Positive buckets for the native histogram.
+ // Use a no-op span (offset 0, length 0) for a native histogram without any
+ // observations yet and with a zero_threshold of 0. Otherwise, it would be
+ // indistinguishable from a classic histogram.
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
- PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+ // Only used for native histograms. These exemplars MUST have a timestamp.
+ Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{6}
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Histogram) GetSampleCountFloat() float64 {
- if m != nil && m.SampleCountFloat != nil {
- return *m.SampleCountFloat
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
}
return 0
}
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
+ }
+ return nil
+}
+
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
}
return nil
}
-func (m *Histogram) GetSchema() int32 {
- if m != nil && m.Schema != nil {
- return *m.Schema
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
}
return 0
}
-func (m *Histogram) GetZeroThreshold() float64 {
- if m != nil && m.ZeroThreshold != nil {
- return *m.ZeroThreshold
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
}
return 0
}
-func (m *Histogram) GetZeroCount() uint64 {
- if m != nil && m.ZeroCount != nil {
- return *m.ZeroCount
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
}
return 0
}
-func (m *Histogram) GetZeroCountFloat() float64 {
- if m != nil && m.ZeroCountFloat != nil {
- return *m.ZeroCountFloat
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
}
return 0
}
-func (m *Histogram) GetNegativeSpan() []*BucketSpan {
- if m != nil {
- return m.NegativeSpan
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
}
return nil
}
-func (m *Histogram) GetNegativeDelta() []int64 {
- if m != nil {
- return m.NegativeDelta
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
}
return nil
}
-func (m *Histogram) GetNegativeCount() []float64 {
- if m != nil {
- return m.NegativeCount
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
}
return nil
}
-func (m *Histogram) GetPositiveSpan() []*BucketSpan {
- if m != nil {
- return m.PositiveSpan
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
}
return nil
}
-func (m *Histogram) GetPositiveDelta() []int64 {
- if m != nil {
- return m.PositiveDelta
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
}
return nil
}
-func (m *Histogram) GetPositiveCount() []float64 {
- if m != nil {
- return m.PositiveCount
+func (x *Histogram) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
}
return nil
}
@@ -513,64 +634,72 @@ func (m *Histogram) GetPositiveCount() []float64 {
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{7}
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
-}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
}
return 0
}
-func (m *Bucket) GetCumulativeCountFloat() float64 {
- if m != nil && m.CumulativeCountFloat != nil {
- return *m.CumulativeCountFloat
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
}
return 0
}
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
}
return 0
}
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
@@ -582,333 +711,689 @@ func (m *Bucket) GetExemplar() *Exemplar {
// structured here (with all the buckets in a single array separate
// from the Spans).
type BucketSpan struct {
- Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
- Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *BucketSpan) Reset() { *m = BucketSpan{} }
-func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
-func (*BucketSpan) ProtoMessage() {}
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{8}
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
}
-func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
-}
-func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
-}
-func (m *BucketSpan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BucketSpan.Merge(m, src)
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BucketSpan) XXX_Size() int {
- return xxx_messageInfo_BucketSpan.Size(m)
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BucketSpan) XXX_DiscardUnknown() {
- xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
-func (m *BucketSpan) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
}
return 0
}
-func (m *BucketSpan) GetLength() uint32 {
- if m != nil && m.Length != nil {
- return *m.Length
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
}
return 0
}
type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{9}
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{10}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Metric proto.InternalMessageInfo
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
}
return nil
}
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
}
return nil
}
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
}
return nil
}
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
}
return nil
}
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
}
return nil
}
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
}
return 0
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{11}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
-}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
}
return ""
}
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return MetricType_COUNTER
}
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
}
return nil
}
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-}
-
-func init() {
- proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
-}
-
-var fileDescriptor_d1e5ddb18987a258 = []byte{
- // 896 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
- 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
- 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
- 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
- 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
- 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
- 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
- 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
- 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
- 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
- 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
- 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
- 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
- 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
- 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
- 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
- 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
- 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
- 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
- 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
- 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
- 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
- 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
- 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
- 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
- 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
- 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
- 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
- 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
- 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
- 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
- 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
- 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
- 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
- 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
- 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
- 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
- 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
- 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
- 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
- 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
- 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
- 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
- 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
- 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
- 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
- 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
- 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
- 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
- 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
- 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
- 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
- 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
- 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
- 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
- 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
+func (x *MetricFamily) GetUnit() string {
+ if x != nil && x.Unit != nil {
+ return *x.Unit
+ }
+ return ""
+}
+
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+ 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+ 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+ 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+ 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+ 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+ 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+ 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+ 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+ 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+ 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+ 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+ 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+ 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+ 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+ 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+ 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+ 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+ 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+ 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+ 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+ 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+ 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+ 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+ 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f841d632..25cfaa21643e 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
package expfmt
import (
+ "bufio"
"fmt"
"io"
"math"
@@ -21,8 +22,8 @@ import (
"net/http"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/encoding/protodelim"
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/model"
)
@@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
mediatype, params, err := mime.ParseMediaType(ct)
if err != nil {
- return FmtUnknown
+ return fmtUnknown
}
const textType = "text/plain"
@@ -52,42 +53,44 @@ func ResponseFormat(h http.Header) Format {
switch mediatype {
case ProtoType:
if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return FmtUnknown
+ return fmtUnknown
}
if e, ok := params["encoding"]; ok && e != "delimited" {
- return FmtUnknown
+ return fmtUnknown
}
- return FmtProtoDelim
+ return fmtProtoDelim
case textType:
if v, ok := params["version"]; ok && v != TextVersion {
- return FmtUnknown
+ return fmtUnknown
}
- return FmtText
+ return fmtText
}
- return FmtUnknown
+ return fmtUnknown
}
// NewDecoder returns a new decoder based on the given input format.
// If the input format does not imply otherwise, a text format decoder is returned.
func NewDecoder(r io.Reader, format Format) Decoder {
- switch format {
- case FmtProtoDelim:
- return &protoDecoder{r: r}
+ switch format.FormatType() {
+ case TypeProtoDelim:
+ return &protoDecoder{r: bufio.NewReader(r)}
}
return &textDecoder{r: r}
}
// protoDecoder implements the Decoder interface for protocol buffers.
type protoDecoder struct {
- r io.Reader
+ r protodelim.Reader
}
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- _, err := pbutil.ReadDelimited(d.r, v)
- if err != nil {
+ opts := protodelim.UnmarshalOptions{
+ MaxSize: -1,
+ }
+ if err := opts.UnmarshalFrom(d.r, v); err != nil {
return err
}
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
@@ -115,32 +118,31 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
- p TextParser
- fams []*dto.MetricFamily
+ fams map[string]*dto.MetricFamily
+ err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
+ if d.err == nil {
+ // Read all metrics in one shot.
+ var p TextParser
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
}
}
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ v.Name = fam.Name
+ v.Help = fam.Help
+ v.Type = fam.Type
+ v.Metric = fam.Metric
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index 64dc0eb40c28..ff5ef7a9d920 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,9 +18,12 @@ import (
"io"
"net/http"
- "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+ "google.golang.org/protobuf/encoding/protodelim"
+ "google.golang.org/protobuf/encoding/prototext"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/munnerz/goautoneg"
dto "github.com/prometheus/client_model/go"
)
@@ -60,23 +63,32 @@ func (ec encoderCloser) Close() error {
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return FmtProtoDelim
+ return fmtProtoDelim + escapingScheme
case "text":
- return FmtProtoText
+ return fmtProtoText + escapingScheme
case "compact-text":
- return FmtProtoCompact
+ return fmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
+ return fmtText + escapingScheme
}
}
- return FmtText
+ return fmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -84,26 +96,40 @@ func Negotiate(h http.Header) Format {
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return FmtProtoDelim
+ return fmtProtoDelim + escapingScheme
case "text":
- return FmtProtoText
+ return fmtProtoText + escapingScheme
case "compact-text":
- return FmtProtoCompact
+ return fmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
+ return fmtText + escapingScheme
}
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
- return FmtOpenMetrics
+ if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+ switch ver {
+ case OpenMetricsVersion_1_0_0:
+ return fmtOpenMetrics_1_0_0 + escapingScheme
+ default:
+ return fmtOpenMetrics_0_0_1 + escapingScheme
+ }
}
}
- return FmtText
+ return fmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
@@ -112,44 +138,54 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
-func NewEncoder(w io.Writer, format Format) Encoder {
- switch format {
- case FmtProtoDelim:
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+ escapingScheme := format.ToEscapingScheme()
+
+ switch format.FormatType() {
+ case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := pbutil.WriteDelimited(w, v)
+ _, err := protodelim.MarshalTo(w, v)
return err
},
close: func() error { return nil },
}
- case FmtProtoCompact:
+ case TypeProtoCompact:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, v.String())
+ _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
return err
},
close: func() error { return nil },
}
- case FmtProtoText:
+ case TypeProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
return err
},
close: func() error { return nil },
}
- case FmtText:
+ case TypeTextPlain:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, v)
+ _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
}
- case FmtOpenMetrics:
+ case TypeOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToOpenMetrics(w, v)
+ _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
return err
},
close: func() error {
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 0f176fa64f25..051b38cd1781 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -14,28 +14,164 @@
// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
// Format specifies the HTTP content type of the different wire protocols.
type Format string
-// Constants to assemble the Content-Type values for the different wire protocols.
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion = "0.0.1"
-
- // The Content-Type values for the different wire protocols.
- FmtUnknown Format = ``
- FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- FmtProtoText Format = ProtoFmt + ` encoding=text`
- FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
- FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ OpenMetricsVersion_0_0_1 = "0.0.1"
+ OpenMetricsVersion_1_0_0 = "1.0.0"
+
+ // The Content-Type values for the different wire protocols. Note that these
+ // values are now unexported. If code was relying on comparisons to these
+ // constants, instead use FormatType().
+ fmtUnknown Format = ``
+ fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ fmtProtoDelim Format = protoFmt + ` encoding=delimited`
+ fmtProtoText Format = protoFmt + ` encoding=text`
+ fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
+ fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
hdrContentType = "Content-Type"
hdrAccept = "Accept"
)
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+ TypeUnknown FormatType = iota
+ TypeProtoCompact
+ TypeProtoDelim
+ TypeProtoText
+ TypeTextPlain
+ TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+ switch t {
+ case TypeProtoCompact:
+ return fmtProtoCompact
+ case TypeProtoDelim:
+ return fmtProtoDelim
+ case TypeProtoText:
+ return fmtProtoText
+ case TypeTextPlain:
+ return fmtText
+ case TypeOpenMetrics:
+ return fmtOpenMetrics_1_0_0
+ default:
+ return fmtUnknown
+ }
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+ if version == OpenMetricsVersion_0_0_1 {
+ return fmtOpenMetrics_0_0_1, nil
+ }
+ if version == OpenMetricsVersion_1_0_0 {
+ return fmtOpenMetrics_1_0_0, nil
+ }
+ return fmtUnknown, fmt.Errorf("unknown open metrics version string")
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+ toks := strings.Split(string(f), ";")
+ params := make(map[string]string)
+ for i, t := range toks {
+ if i == 0 {
+ continue
+ }
+ args := strings.Split(t, "=")
+ if len(args) != 2 {
+ continue
+ }
+ params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+ }
+
+ switch strings.TrimSpace(toks[0]) {
+ case ProtoType:
+ if params["proto"] != ProtoProtocol {
+ return TypeUnknown
+ }
+ switch params["encoding"] {
+ case "delimited":
+ return TypeProtoDelim
+ case "text":
+ return TypeProtoText
+ case "compact-text":
+ return TypeProtoCompact
+ default:
+ return TypeUnknown
+ }
+ case OpenMetricsType:
+ if params["charset"] != "utf-8" {
+ return TypeUnknown
+ }
+ return TypeOpenMetrics
+ case "text/plain":
+ v, ok := params["version"]
+ if !ok {
+ return TypeTextPlain
+ }
+ if v == TextVersion {
+ return TypeTextPlain
+ }
+ return TypeUnknown
+ default:
+ return TypeUnknown
+ }
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (format Format) ToEscapingScheme() model.EscapingScheme {
+ for _, p := range strings.Split(string(format), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ continue
+ }
+ key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+ if key == model.EscapingKey {
+ scheme, err := model.ToEscapingScheme(value)
+ if err != nil {
+ return model.NameEscapingScheme
+ }
+ return scheme
+ }
+ }
+ return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index f819e4f8b549..dfac962a4e7e 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -21,8 +21,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 9d94ae9effe3..353c5e93f92d 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,11 +22,47 @@ import (
"strconv"
"strings"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
+type encoderOption struct {
+ withCreatedLines bool
+ withUnit bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// /~https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+ return func(t *encoderOption) {
+ t.withCreatedLines = true
+ }
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+ return func(t *encoderOption) {
+ t.withUnit = true
+ }
+}
+
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
@@ -35,6 +71,18 @@ import (
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
@@ -46,21 +94,35 @@ import (
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+// lines. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
+// - According to the OM specs, the `# UNIT` line is optional, but if populated,
+// the unit has to be present in the metric name as its suffix:
+// (see /~https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
+// However, in order to accommodate any potential scenario where such a change in the
+// metric name is not desirable, the users are here given the choice of either explicitly
+// opt in, in case they wish for the unit to be included in the output AND in the metric name
+// as a suffix (see the description of the WithUnit function above),
+// or not to opt in, in case they don't want for any of that to happen.
//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
+// - No support for the following (optional) features: info type,
+// stateset type, gaugehistogram type.
//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
+//
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+ toOM := encoderOption{}
+ for _, option := range options {
+ option(&toOM)
+ }
+
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
@@ -83,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
var (
- n int
- metricType = in.GetType()
- shortName = name
+ n int
+ metricType = in.GetType()
+ compliantName = name
)
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
- shortName = name[:len(name)-6]
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+ compliantName = name[:len(name)-6]
+ }
+ if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
+ compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
}
// Comments, first HELP, then TYPE.
@@ -98,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
- n, err = w.WriteString(shortName)
+ n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -124,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
- n, err = w.WriteString(shortName)
+ n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -151,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
+ if toOM.withUnit && in.Unit != nil {
+ n, err = w.WriteString("# UNIT ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, compliantName)
+ written += n
+ if err != nil {
+ return
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Unit, true)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+
+ var createdTsBytesWritten int
// Finally the samples, one line for each.
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+ compliantName = compliantName + "_total"
+ }
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
+ "expected counter in metric %s %s", compliantName, metric,
)
}
- // Note that we have ensured above that either the name
- // ends on `_total` or that the rendered type is
- // `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
+ if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
+ "expected gauge in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
+ "expected untyped in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
+ "expected summary in metric %s %s", compliantName, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
- w, name, "", metric,
+ w, compliantName, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
@@ -210,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
}
n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
+ w, compliantName, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
@@ -219,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
return
}
n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
+ w, compliantName, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
+ if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
+ "expected histogram in metric %s %s", compliantName, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
+ w, compliantName, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
@@ -247,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
if !infSeen {
n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
+ w, compliantName, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
@@ -258,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
}
n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
+ w, compliantName, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
@@ -267,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
return
}
n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
+ w, compliantName, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
+ if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
default:
return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
+ "unexpected type in metric %s %s", compliantName, metric,
)
}
written += n
@@ -303,21 +410,9 @@ func writeOpenMetricsSample(
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeOpenMetricsLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -350,7 +445,7 @@ func writeOpenMetricsSample(
return written, err
}
}
- if exemplar != nil {
+ if exemplar != nil && len(exemplar.Label) > 0 {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
@@ -365,27 +460,58 @@ func writeOpenMetricsSample(
return written, nil
}
-// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
-// in OpenMetrics style.
-func writeOpenMetricsLabelPairs(
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
w enhancedWriter,
+ name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
var (
- written int
- separator byte = '{'
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
)
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces, quoted.
+ if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
- n, err := w.WriteString(lp.GetName())
+ n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -442,6 +568,49 @@ func writeOpenMetricsLabelPairs(
return written, nil
}
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+ name, suffixToTrim string, metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+
+ // TODO(beorn7): Format this directly from components of ts to
+ // avoid overflow/underflow and precision issues of the float
+ // conversion.
+ n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
@@ -451,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
- n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
+ n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
written += n
if err != nil {
return written, err
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b06547..f9b8265a9ec2 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
- "io/ioutil"
"math"
"strconv"
"strings"
@@ -44,7 +43,7 @@ const (
var (
bufPool = sync.Pool{
New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
+ return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{
@@ -63,6 +62,18 @@ var (
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
@@ -99,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
- n, err = w.WriteString(name)
+ n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -125,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
- n, err = w.WriteString(name)
+ n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -281,21 +292,9 @@ func writeSample(
additionalLabelName string, additionalLabelValue float64,
value float64,
) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
+ written := 0
+ n, err := writeNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -331,32 +330,64 @@ func writeSample(
return written, nil
}
-// writeLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'w'. An empty slice in combination with an empty
-// string 'additionalLabelName' results in nothing being written. Otherwise, the
-// label pairs are written, escaped as required by the text format, and enclosed
-// in '{...}'. The function returns the number of bytes written and any error
-// encountered.
-func writeLabelPairs(
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
w enhancedWriter,
+ name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
var (
- written int
- separator byte = '{'
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
)
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces.
+ if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
- n, err := w.WriteString(lp.GetName())
+ n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -463,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
numBufPool.Put(bp)
return written, err
}
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+ if model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ return w.WriteString(name)
+ }
+ var written int
+ var err error
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ var n int
+ n, err = writeEscapedString(w, name, true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 84be0643ec67..26490211af2a 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,6 +16,7 @@ package expfmt
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"math"
@@ -24,7 +25,8 @@ import (
dto "github.com/prometheus/client_model/go"
- "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
+ "google.golang.org/protobuf/proto"
+
"github.com/prometheus/common/model"
)
@@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
- if p.err == io.EOF {
+ if p.err != nil && errors.Is(p.err, io.EOF) {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
@@ -142,9 +144,13 @@ func (p *TextParser) reset(in io.Reader) {
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if errors.Is(p.err, io.EOF) {
+ p.err = nil
+ }
return nil
}
switch p.currentByte {
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
deleted file mode 100644
index 26e92288c7c0..000000000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-*/
-package goautoneg
-
-import (
- "sort"
- "strconv"
- "strings"
-)
-
-// Structure to represent a clause in an HTTP Accept Header
-type Accept struct {
- Type, SubType string
- Q float64
- Params map[string]string
-}
-
-// For internal use, so that we can use the sort interface
-type accept_slice []Accept
-
-func (accept accept_slice) Len() int {
- slice := []Accept(accept)
- return len(slice)
-}
-
-func (accept accept_slice) Less(i, j int) bool {
- slice := []Accept(accept)
- ai, aj := slice[i], slice[j]
- if ai.Q > aj.Q {
- return true
- }
- if ai.Type != "*" && aj.Type == "*" {
- return true
- }
- if ai.SubType != "*" && aj.SubType == "*" {
- return true
- }
- return false
-}
-
-func (accept accept_slice) Swap(i, j int) {
- slice := []Accept(accept)
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-// Parse an Accept Header string returning a sorted list
-// of clauses
-func ParseAccept(header string) (accept []Accept) {
- parts := strings.Split(header, ",")
- accept = make([]Accept, 0, len(parts))
- for _, part := range parts {
- part := strings.Trim(part, " ")
-
- a := Accept{}
- a.Params = make(map[string]string)
- a.Q = 1.0
-
- mrp := strings.Split(part, ";")
-
- media_range := mrp[0]
- sp := strings.Split(media_range, "/")
- a.Type = strings.Trim(sp[0], " ")
-
- switch {
- case len(sp) == 1 && a.Type == "*":
- a.SubType = "*"
- case len(sp) == 2:
- a.SubType = strings.Trim(sp[1], " ")
- default:
- continue
- }
-
- if len(mrp) == 1 {
- accept = append(accept, a)
- continue
- }
-
- for _, param := range mrp[1:] {
- sp := strings.SplitN(param, "=", 2)
- if len(sp) != 2 {
- continue
- }
- token := strings.Trim(sp[0], " ")
- if token == "q" {
- a.Q, _ = strconv.ParseFloat(sp[1], 32)
- } else {
- a.Params[token] = strings.Trim(sp[1], " ")
- }
- }
-
- accept = append(accept, a)
- }
-
- slice := accept_slice(accept)
- sort.Sort(slice)
-
- return
-}
-
-// Negotiate the most appropriate content_type given the accept header
-// and a list of alternatives.
-func Negotiate(header string, alternatives []string) (content_type string) {
- asp := make([][]string, 0, len(alternatives))
- for _, ctype := range alternatives {
- asp = append(asp, strings.SplitN(ctype, "/", 2))
- }
- for _, clause := range ParseAccept(header) {
- for i, ctsp := range asp {
- if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
- content_type = alternatives[i]
- return
- }
- if clause.Type == ctsp[0] && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- if clause.Type == "*" && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- }
- }
- return
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c7ad21..80d1fe944ea1 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool {
// Status returns the status of the alert.
func (a *Alert) Status() AlertStatus {
- if a.Resolved() {
+ return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+ if a.ResolvedAt(ts) {
return AlertResolved
}
return AlertFiring
@@ -90,13 +95,13 @@ func (a *Alert) Validate() error {
return fmt.Errorf("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %s", err)
+ return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %s", err)
+ return fmt.Errorf("invalid annotations: %w", err)
}
return nil
}
@@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool {
return false
}
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+ for _, a := range as {
+ if !a.ResolvedAt(ts) {
+ return true
+ }
+ }
+ return false
+}
+
// Status returns StatusFiring iff at least one of the alerts is firing.
func (as Alerts) Status() AlertStatus {
if as.HasFiring() {
@@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus {
}
return AlertResolved
}
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+ if as.HasFiringAt(ts) {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index ef8956335468..3317ce22ff73 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith.
type LabelName string
-// IsValid is true iff the label name matches the pattern of LabelNameRE. This
-// method, however, does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
+// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
+// names, and iff it's valid UTF-8 if NameValidationScheme is set to
+// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
+// check but a much faster hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
+ switch NameValidationScheme {
+ case LegacyValidation:
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
}
+ case UTF8Validation:
+ return utf8.ValidString(string(ln))
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
return true
}
@@ -164,7 +172,7 @@ func (l LabelNames) String() string {
// A LabelValue is an associated value for a LabelName.
type LabelValue string
-// IsValid returns true iff the string is a valid UTF8.
+// IsValid returns true iff the string is a valid UTF-8.
func (lv LabelValue) IsValid() bool {
return utf8.ValidString(string(lv))
}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index 6eda08a7395a..d0ad88da3346 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -17,7 +17,6 @@ import (
"encoding/json"
"fmt"
"sort"
- "strings"
)
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
@@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
return result
}
-func (l LabelSet) String() string {
- lstrs := make([]string, 0, len(l))
- for l, v := range l {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
- }
-
- sort.Strings(lstrs)
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
-
// Fingerprint returns the LabelSet's fingerprint.
func (ls LabelSet) Fingerprint() Fingerprint {
return labelSetToFingerprint(ls)
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 000000000000..481c47b46e5d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,45 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.21
+
+package model
+
+import (
+ "bytes"
+ "slices"
+ "strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+ var lna [32]string // On stack to avoid memory allocation for sorting names.
+ labelNames := lna[:0]
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ slices.Sort(labelNames)
+ var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+ b := bytes.NewBuffer(bytea[:0])
+ b.WriteByte('{')
+ for i, name := range labelNames {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(name)
+ b.WriteByte('=')
+ b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+ }
+ b.WriteByte('}')
+ return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
new file mode 100644
index 000000000000..c4212685e71f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.21
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// String was optimized using functions not available for go 1.20
+// or lower. We keep the old implementation for compatibility with client_golang.
+// Once client golang drops support for go 1.20 (scheduled for August 2024), this
+// file can be removed.
+func (l LabelSet) String() string {
+ labelNames := make([]string, 0, len(l))
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ sort.Strings(labelNames)
+ lstrs := make([]string, 0, len(l))
+ for _, name := range labelNames {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
+ }
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 000000000000..447ab8ad635b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+ MetricTypeCounter = MetricType("counter")
+ MetricTypeGauge = MetricType("gauge")
+ MetricTypeHistogram = MetricType("histogram")
+ MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+ MetricTypeSummary = MetricType("summary")
+ MetricTypeInfo = MetricType("info")
+ MetricTypeStateset = MetricType("stateset")
+ MetricTypeUnknown = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7fedb0..eb865e5a59cf 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -18,15 +18,84 @@ import (
"regexp"
"sort"
"strings"
+ "unicode/utf8"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
var (
- // MetricNameRE is a regular expression matching valid metric
- // names. Note that the IsValidMetricName function performs the same
- // check but faster than a match with this regular expression.
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+ // NameValidationScheme determines the method of name validation to be used by
+ // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
+ // in isolation from other components that don't support UTF-8 may result in
+ // bugs or other undefined behavior. This value is intended to be set by
+ // UTF-8-aware binaries as part of their startup. To avoid need for locking,
+ // this value should be set once, ideally in an init(), before multiple
+ // goroutines are started.
+ NameValidationScheme = LegacyValidation
+
+ // NameEscapingScheme defines the default way that names will be
+ // escaped when presented to systems that do not support UTF-8 names. If the
+ // Content-Type "escaping" term is specified, that will override this value.
+ NameEscapingScheme = ValueEncodingEscaping
+)
+
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+ // LegacyValidation is a setting that requirets that metric and label names
+ // conform to the original Prometheus character requirements described by
+ // MetricNameRE and LabelNameRE.
+ LegacyValidation ValidationScheme = iota
+
+ // UTF8Validation only requires that metric and label names be valid UTF-8
+ // strings.
+ UTF8Validation
+)
+
+type EscapingScheme int
+
+const (
+ // NoEscaping indicates that a name will not be escaped. Unescaped names that
+ // do not conform to the legacy validity check will use a new exposition
+ // format syntax that will be officially standardized in future versions.
+ NoEscaping EscapingScheme = iota
+
+ // UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+ UnderscoreEscaping
+
+ // DotsEscaping is similar to UnderscoreEscaping, except that dots are
+ // converted to `_dot_` and pre-existing underscores are converted to `__`.
+ DotsEscaping
+
+ // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+ // characters with the unicode value, surrounded by underscores. Single
+ // underscores are replaced with double underscores.
+ ValueEncodingEscaping
+)
+
+const (
+ // EscapingKey is the key in an Accept or Content-Type header that defines how
+ // metric and label names that do not conform to the legacy character
+ // requirements should be escaped when being scraped by a legacy prometheus
+ // system. If a system does not explicitly pass an escaping parameter in the
+ // Accept header, the default NameEscapingScheme will be used.
+ EscapingKey = "escaping"
+
+ // Possible values for Escaping Key:
+ AllowUTF8 = "allow-utf-8" // No escaping required.
+ EscapeUnderscores = "underscores"
+ EscapeDots = "dots"
+ EscapeValues = "values"
)
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
type Metric LabelSet
@@ -86,17 +155,303 @@ func (m Metric) FastFingerprint() Fingerprint {
return LabelSet(m).FastFingerprint()
}
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+func IsValidMetricName(n LabelValue) bool {
+ switch NameValidationScheme {
+ case LegacyValidation:
+ return IsValidLegacyMetricName(n)
+ case UTF8Validation:
+ if len(n) == 0 {
+ return false
+ }
+ return utf8.ValidString(string(n))
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
+ }
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
-func IsValidMetricName(n LabelValue) bool {
+func IsValidLegacyMetricName(n LabelValue) bool {
if len(n) == 0 {
return false
}
for i, b := range n {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ if !isValidLegacyRune(b, i) {
return false
}
}
return true
}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+ if v == nil {
+ return nil
+ }
+
+ if scheme == NoEscaping {
+ return v
+ }
+
+ out := &dto.MetricFamily{
+ Help: v.Help,
+ Type: v.Type,
+ Unit: v.Unit,
+ }
+
+ // If the name is nil, copy as-is, don't try to escape.
+ if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
+ out.Name = v.Name
+ } else {
+ out.Name = proto.String(EscapeName(v.GetName(), scheme))
+ }
+ for _, m := range v.Metric {
+ if !metricNeedsEscaping(m) {
+ out.Metric = append(out.Metric, m)
+ continue
+ }
+
+ escaped := &dto.Metric{
+ Gauge: m.Gauge,
+ Counter: m.Counter,
+ Summary: m.Summary,
+ Untyped: m.Untyped,
+ Histogram: m.Histogram,
+ TimestampMs: m.TimestampMs,
+ }
+
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel {
+ if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(MetricNameLabel),
+ Value: proto.String(EscapeName(l.GetValue(), scheme)),
+ })
+ continue
+ }
+ if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(EscapeName(l.GetName(), scheme)),
+ Value: l.Value,
+ })
+ }
+ out.Metric = append(out.Metric, escaped)
+ }
+ return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ return true
+ }
+ if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ return true
+ }
+ }
+ return false
+}
+
+const (
+ lowerhex = "0123456789abcdef"
+)
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ var escaped strings.Builder
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ if IsValidLegacyMetricName(LabelValue(name)) {
+ return name
+ }
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case DotsEscaping:
+ // Do not early return for legacy valid names, we still escape underscores.
+ for i, b := range name {
+ if b == '_' {
+ escaped.WriteString("__")
+ } else if b == '.' {
+ escaped.WriteString("_dot_")
+ } else if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case ValueEncodingEscaping:
+ if IsValidLegacyMetricName(LabelValue(name)) {
+ return name
+ }
+ escaped.WriteString("U__")
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else if !utf8.ValidRune(b) {
+ escaped.WriteString("_FFFD_")
+ } else if b < 0x100 {
+ escaped.WriteRune('_')
+ for s := 4; s >= 0; s -= 4 {
+ escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
+ }
+ escaped.WriteRune('_')
+ } else if b < 0x10000 {
+ escaped.WriteRune('_')
+ for s := 12; s >= 0; s -= 4 {
+ escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
+ }
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+// lower function taken from strconv.atoi
+func lower(c byte) byte {
+ return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ // It is not possible to unescape from underscore replacement.
+ return name
+ case DotsEscaping:
+ name = strings.ReplaceAll(name, "_dot_", ".")
+ name = strings.ReplaceAll(name, "__", "_")
+ return name
+ case ValueEncodingEscaping:
+ escapedName, found := strings.CutPrefix(name, "U__")
+ if !found {
+ return name
+ }
+
+ var unescaped strings.Builder
+ TOP:
+ for i := 0; i < len(escapedName); i++ {
+ // All non-underscores are treated normally.
+ if escapedName[i] != '_' {
+ unescaped.WriteByte(escapedName[i])
+ continue
+ }
+ i++
+ if i >= len(escapedName) {
+ return name
+ }
+ // A double underscore is a single underscore.
+ if escapedName[i] == '_' {
+ unescaped.WriteByte('_')
+ continue
+ }
+ // We think we are in a UTF-8 code, process it.
+ var utf8Val uint
+ for j := 0; i < len(escapedName); j++ {
+ // This is too many characters for a utf8 value.
+ if j > 4 {
+ return name
+ }
+ // Found a closing underscore, convert to a rune, check validity, and append.
+ if escapedName[i] == '_' {
+ utf8Rune := rune(utf8Val)
+ if !utf8.ValidRune(utf8Rune) {
+ return name
+ }
+ unescaped.WriteRune(utf8Rune)
+ continue TOP
+ }
+ r := lower(escapedName[i])
+ utf8Val *= 16
+ if r >= '0' && r <= '9' {
+ utf8Val += uint(r) - '0'
+ } else if r >= 'a' && r <= 'f' {
+ utf8Val += uint(r) - 'a' + 10
+ } else {
+ return name
+ }
+ i++
+ }
+ // Didn't find closing underscore, invalid.
+ return name
+ }
+ return unescaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+ switch e {
+ case NoEscaping:
+ return AllowUTF8
+ case UnderscoreEscaping:
+ return EscapeUnderscores
+ case DotsEscaping:
+ return EscapeDots
+ case ValueEncodingEscaping:
+ return EscapeValues
+ default:
+ panic(fmt.Sprintf("unknown format scheme %d", e))
+ }
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+ if s == "" {
+ return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
+ }
+ switch s {
+ case AllowUTF8:
+ return NoEscaping, nil
+ case EscapeUnderscores:
+ return UnderscoreEscaping, nil
+ case EscapeDots:
+ return DotsEscaping, nil
+ case EscapeValues:
+ return ValueEncodingEscaping, nil
+ default:
+ return NoEscaping, fmt.Errorf("unknown format scheme " + s)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13c63d1..dc8a0026c484 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@ import (
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
-var (
- // cache the signature of an empty label set.
- emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889d2cc6..910b0b71fcce 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %s", err)
+ return fmt.Errorf("invalid matcher: %w", err)
}
}
if s.StartsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index c909b8aa8c50..5727452c1ee9 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"math"
- "regexp"
"strconv"
"strings"
"time"
@@ -183,54 +182,78 @@ func (d *Duration) Type() string {
return "duration"
}
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- switch durationStr {
+func ParseDuration(s string) (Duration, error) {
+ switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, errors.New("empty duration string")
}
- matches := durationRE.FindStringSubmatch(durationStr)
- if matches == nil {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var dur time.Duration
- // Parse the match at pos `pos` in the regex and use `mult` to turn that
- // into ms, then add that value to the total parsed duration.
- var overflowErr error
- m := func(pos int, mult time.Duration) {
- if matches[pos] == "" {
- return
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
- n, _ := strconv.Atoi(matches[pos])
+ s = s[i:]
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
- if n > int((1<<63-1)/mult/time.Millisecond) {
- overflowErr = errors.New("duration out of range")
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
}
- d := time.Duration(n) * time.Millisecond
- dur += d * mult
-
- if dur < 0 {
- overflowErr = errors.New("duration out of range")
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
}
}
-
- m(2, 1000*60*60*24*365) // y
- m(4, 1000*60*60*24*7) // w
- m(6, 1000*60*60*24) // d
- m(8, 1000*60*60) // h
- m(10, 1000*60) // m
- m(12, 1000) // s
- m(14, 1) // ms
-
- return Duration(dur), overflowErr
+ return Duration(dur), nil
}
func (d Duration) String() string {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1a2831..8050637d8223 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -16,104 +16,26 @@ package model
import (
"encoding/json"
"fmt"
- "math"
"sort"
"strconv"
"strings"
)
-var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
- // ZeroSample is the pseudo zero-value of Sample used to signal a
- // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
- // and metric nil. Note that the natural zero value of Sample has a timestamp
- // of 0, which is possible to appear in a real Sample and thus not suitable
- // to signal a non-existing Sample.
- ZeroSample = Sample{Timestamp: Earliest}
-)
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +51,19 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
-
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@@ -142,6 +72,19 @@ func (s Sample) String() string {
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@@ -152,21 +95,25 @@ func (s Sample) MarshalJSON() ([]byte, error) {
Value: s.Value,
},
}
-
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
}
if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +121,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
}
s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
return nil
}
@@ -221,80 +173,76 @@ func (s Samples) Equal(o Samples) bool {
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else if len(ss.Histograms) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
}
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
}
// Scalar is a scalar value evaluated at the set timestamp.
@@ -324,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
value, err := strconv.ParseFloat(f, 64)
if err != nil {
- return fmt.Errorf("error parsing sample value: %s", err)
+ return fmt.Errorf("error parsing sample value: %w", err)
}
s.Value = SampleValue(value)
return nil
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 000000000000..ae35cc2ab4b9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 000000000000..54bb038cfff3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,178 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (b HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
+ upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", b.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, fmt.Errorf("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return fmt.Errorf("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 000000000000..726c50ee638c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index a197699a1ee9..126df9e67acf 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,8 +1,16 @@
---
linters:
enable:
+ - errcheck
- godot
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
- revive
+ - staticcheck
+ - testifylint
+ - unused
linter-settings:
godot:
@@ -10,3 +18,5 @@ linter-settings:
exclude:
# Ignore "See: URL"
- 'See:'
+ misspell:
+ locale: US
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
index 56ba67d3e317..e00f3b365b62 100644
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke @discordianfish
-* Paul Gier @pgier
+* Paul Gier @pgier
+* Ben Kochie @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 6c8e3e219797..161729235068 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -49,25 +49,28 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell which gotestsum),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
-PROMU_VERSION ?= 0.13.0
+PROMU_VERSION ?= 0.17.0
PROMU_URL := /~https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.45.2
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
+GOLANGCI_LINT_VERSION ?= v1.59.0
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
+ ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
- ifeq (,$(CIRCLE_JOB))
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
@@ -88,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -164,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
- $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint fix"
+ $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell which yamllint))
+ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@@ -199,10 +208,14 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+ @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -211,19 +224,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 43c37735a70f..1224816c2ade 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash
-rm -rf fixtures
+rm -rf testdata/fixtures
make test
```
Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
-`git diff fixtures.ttar`.
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 68f36e888f91..cdcc8a7ccc46 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
- return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
+ return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
- return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
+ return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
- return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
+ return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index f5b7939b266a..83807500908f 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -55,18 +55,18 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
parts := strings.Fields(line)
if len(parts) < 4 {
- return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
- node := strings.TrimRight(parts[1], ",")
- zone := strings.TrimRight(parts[3], ",")
+ node := strings.TrimSuffix(parts[1], ",")
+ zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
- return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
}
}
@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
- return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
+ return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index ff6b927da159..f0950bb49534 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
- match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
+ match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -380,12 +381,48 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil
}
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -430,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 000000000000..d88442f0edfd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index ea41bf2ca1e2..a6b2b3127cb1 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
index 5048ad1f2147..5f2a37a78b3f 100644
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
- return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
- return nil, fmt.Errorf("malformed crypto line: %q", text)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
index d31a82600f67..f9d961e44179 100644
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
//
// Example:
//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 0102ab0fd856..4980c875bfce 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -20,7 +20,8 @@ import (
// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
type FS struct {
- proc fs.FS
+ proc fs.FS
+ isReal bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil {
return FS{}, err
}
- return FS{fs}, nil
+
+ isReal, err := isRealProc(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+
+ return FS{fs, isReal}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 000000000000..134767d69ac1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct
+func isRealProc(mountPoint string) (bool, error) {
+ return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 000000000000..80df79c31930
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+ "syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+ stat := syscall.Statfs_t{}
+ err := syscall.Statfs(mountPoint, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+ return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index f8070e6e2bec..cf2e3eaa03c6 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
- return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
+ return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
- return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
+ return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {
@@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
- return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
+ return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
}
switch fields[0] {
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index b030951faf98..14272dc78857 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil
}
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+ us := make([]*uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path)
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 391c07957e90..bc3a20c932d1 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
- return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
- return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+ return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
- return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+ return nil, 0,
+ fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 0096cafbdf86..332e76c17f5b 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
- return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
- return nil, fmt.Errorf("could not parse load %q: %w", load, err)
+ return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index a95c889cb9e2..67a9d2b44867 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -23,7 +23,7 @@ import (
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
- recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+ recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
@@ -50,6 +50,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
+ // Number of blocks on the device that need to be synced.
+ BlocksToBeSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
@@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
- return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@@ -90,13 +92,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
- return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
+ return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
- return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
+ return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
@@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
- return nil, fmt.Errorf("error parsing md device lines: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
+ blocksSynced := size
+ blocksToBeSynced := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
@@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
- syncedBlocks = 0
+ blocksSynced = 0
} else {
- syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
+ blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
- return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
+ return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
- BlocksSynced: syncedBlocks,
+ BlocksSynced: blocksSynced,
+ BlocksToBeSynced: blocksToBeSynced,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
@@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
- return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
return active, total, down, size, nil
}
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
}
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ blocks := strings.Split(matches[1], "/")
+ blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
+ }
+
+ blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+ if err != nil {
+ return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
- return syncedBlocks, pct, finish, speed, nil
+ return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index f65e174e57b3..4b2c4050a3df 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -126,6 +126,7 @@ type Meminfo struct {
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
+ Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
@@ -140,6 +141,55 @@ type Meminfo struct {
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
+
+ // The struct fields below are the byte-normalized counterparts to the
+ // existing struct fields. Values are normalized using the optional
+ // unit field in the meminfo line.
+ MemTotalBytes *uint64
+ MemFreeBytes *uint64
+ MemAvailableBytes *uint64
+ BuffersBytes *uint64
+ CachedBytes *uint64
+ SwapCachedBytes *uint64
+ ActiveBytes *uint64
+ InactiveBytes *uint64
+ ActiveAnonBytes *uint64
+ InactiveAnonBytes *uint64
+ ActiveFileBytes *uint64
+ InactiveFileBytes *uint64
+ UnevictableBytes *uint64
+ MlockedBytes *uint64
+ SwapTotalBytes *uint64
+ SwapFreeBytes *uint64
+ DirtyBytes *uint64
+ WritebackBytes *uint64
+ AnonPagesBytes *uint64
+ MappedBytes *uint64
+ ShmemBytes *uint64
+ SlabBytes *uint64
+ SReclaimableBytes *uint64
+ SUnreclaimBytes *uint64
+ KernelStackBytes *uint64
+ PageTablesBytes *uint64
+ NFSUnstableBytes *uint64
+ BounceBytes *uint64
+ WritebackTmpBytes *uint64
+ CommitLimitBytes *uint64
+ CommittedASBytes *uint64
+ VmallocTotalBytes *uint64
+ VmallocUsedBytes *uint64
+ VmallocChunkBytes *uint64
+ PercpuBytes *uint64
+ HardwareCorruptedBytes *uint64
+ AnonHugePagesBytes *uint64
+ ShmemHugePagesBytes *uint64
+ ShmemPmdMappedBytes *uint64
+ CmaTotalBytes *uint64
+ CmaFreeBytes *uint64
+ HugepagesizeBytes *uint64
+ DirectMap4kBytes *uint64
+ DirectMap2MBytes *uint64
+ DirectMap1GBytes *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
- return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
+ return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
}
return *m, nil
@@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
- // Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
- }
+ var val, valBytes uint64
- v, err := strconv.ParseUint(fields[1], 0, 64)
+ val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
+ switch len(fields) {
+ case 2:
+ // No unit present, use the parsed the value as bytes directly.
+ valBytes = val
+ case 3:
+ // Unit present in optional 3rd field, convert it to
+ // bytes. The only unit supported within the Linux
+ // kernel is `kB`.
+ if fields[2] != "kB" {
+ return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+ }
+
+ valBytes = 1024 * val
+
+ default:
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+ }
+
switch fields[0] {
case "MemTotal:":
- m.MemTotal = &v
+ m.MemTotal = &val
+ m.MemTotalBytes = &valBytes
case "MemFree:":
- m.MemFree = &v
+ m.MemFree = &val
+ m.MemFreeBytes = &valBytes
case "MemAvailable:":
- m.MemAvailable = &v
+ m.MemAvailable = &val
+ m.MemAvailableBytes = &valBytes
case "Buffers:":
- m.Buffers = &v
+ m.Buffers = &val
+ m.BuffersBytes = &valBytes
case "Cached:":
- m.Cached = &v
+ m.Cached = &val
+ m.CachedBytes = &valBytes
case "SwapCached:":
- m.SwapCached = &v
+ m.SwapCached = &val
+ m.SwapCachedBytes = &valBytes
case "Active:":
- m.Active = &v
+ m.Active = &val
+ m.ActiveBytes = &valBytes
case "Inactive:":
- m.Inactive = &v
+ m.Inactive = &val
+ m.InactiveBytes = &valBytes
case "Active(anon):":
- m.ActiveAnon = &v
+ m.ActiveAnon = &val
+ m.ActiveAnonBytes = &valBytes
case "Inactive(anon):":
- m.InactiveAnon = &v
+ m.InactiveAnon = &val
+ m.InactiveAnonBytes = &valBytes
case "Active(file):":
- m.ActiveFile = &v
+ m.ActiveFile = &val
+ m.ActiveFileBytes = &valBytes
case "Inactive(file):":
- m.InactiveFile = &v
+ m.InactiveFile = &val
+ m.InactiveFileBytes = &valBytes
case "Unevictable:":
- m.Unevictable = &v
+ m.Unevictable = &val
+ m.UnevictableBytes = &valBytes
case "Mlocked:":
- m.Mlocked = &v
+ m.Mlocked = &val
+ m.MlockedBytes = &valBytes
case "SwapTotal:":
- m.SwapTotal = &v
+ m.SwapTotal = &val
+ m.SwapTotalBytes = &valBytes
case "SwapFree:":
- m.SwapFree = &v
+ m.SwapFree = &val
+ m.SwapFreeBytes = &valBytes
case "Dirty:":
- m.Dirty = &v
+ m.Dirty = &val
+ m.DirtyBytes = &valBytes
case "Writeback:":
- m.Writeback = &v
+ m.Writeback = &val
+ m.WritebackBytes = &valBytes
case "AnonPages:":
- m.AnonPages = &v
+ m.AnonPages = &val
+ m.AnonPagesBytes = &valBytes
case "Mapped:":
- m.Mapped = &v
+ m.Mapped = &val
+ m.MappedBytes = &valBytes
case "Shmem:":
- m.Shmem = &v
+ m.Shmem = &val
+ m.ShmemBytes = &valBytes
case "Slab:":
- m.Slab = &v
+ m.Slab = &val
+ m.SlabBytes = &valBytes
case "SReclaimable:":
- m.SReclaimable = &v
+ m.SReclaimable = &val
+ m.SReclaimableBytes = &valBytes
case "SUnreclaim:":
- m.SUnreclaim = &v
+ m.SUnreclaim = &val
+ m.SUnreclaimBytes = &valBytes
case "KernelStack:":
- m.KernelStack = &v
+ m.KernelStack = &val
+ m.KernelStackBytes = &valBytes
case "PageTables:":
- m.PageTables = &v
+ m.PageTables = &val
+ m.PageTablesBytes = &valBytes
case "NFS_Unstable:":
- m.NFSUnstable = &v
+ m.NFSUnstable = &val
+ m.NFSUnstableBytes = &valBytes
case "Bounce:":
- m.Bounce = &v
+ m.Bounce = &val
+ m.BounceBytes = &valBytes
case "WritebackTmp:":
- m.WritebackTmp = &v
+ m.WritebackTmp = &val
+ m.WritebackTmpBytes = &valBytes
case "CommitLimit:":
- m.CommitLimit = &v
+ m.CommitLimit = &val
+ m.CommitLimitBytes = &valBytes
case "Committed_AS:":
- m.CommittedAS = &v
+ m.CommittedAS = &val
+ m.CommittedASBytes = &valBytes
case "VmallocTotal:":
- m.VmallocTotal = &v
+ m.VmallocTotal = &val
+ m.VmallocTotalBytes = &valBytes
case "VmallocUsed:":
- m.VmallocUsed = &v
+ m.VmallocUsed = &val
+ m.VmallocUsedBytes = &valBytes
case "VmallocChunk:":
- m.VmallocChunk = &v
+ m.VmallocChunk = &val
+ m.VmallocChunkBytes = &valBytes
+ case "Percpu:":
+ m.Percpu = &val
+ m.PercpuBytes = &valBytes
case "HardwareCorrupted:":
- m.HardwareCorrupted = &v
+ m.HardwareCorrupted = &val
+ m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:":
- m.AnonHugePages = &v
+ m.AnonHugePages = &val
+ m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:":
- m.ShmemHugePages = &v
+ m.ShmemHugePages = &val
+ m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &v
+ m.ShmemPmdMapped = &val
+ m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:":
- m.CmaTotal = &v
+ m.CmaTotal = &val
+ m.CmaTotalBytes = &valBytes
case "CmaFree:":
- m.CmaFree = &v
+ m.CmaFree = &val
+ m.CmaFreeBytes = &valBytes
case "HugePages_Total:":
- m.HugePagesTotal = &v
+ m.HugePagesTotal = &val
case "HugePages_Free:":
- m.HugePagesFree = &v
+ m.HugePagesFree = &val
case "HugePages_Rsvd:":
- m.HugePagesRsvd = &v
+ m.HugePagesRsvd = &val
case "HugePages_Surp:":
- m.HugePagesSurp = &v
+ m.HugePagesSurp = &val
case "Hugepagesize:":
- m.Hugepagesize = &v
+ m.Hugepagesize = &val
+ m.HugepagesizeBytes = &valBytes
case "DirectMap4k:":
- m.DirectMap4k = &v
+ m.DirectMap4k = &val
+ m.DirectMap4kBytes = &valBytes
case "DirectMap2M:":
- m.DirectMap2M = &v
+ m.DirectMap2M = &val
+ m.DirectMap2MBytes = &valBytes
case "DirectMap1G:":
- m.DirectMap1G = &v
+ m.DirectMap1G = &val
+ m.DirectMap1GBytes = &valBytes
}
}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
index 59f4d5055836..a704c5e735f3 100644
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 10 {
- return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
+ return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
}
if mountInfo[mountInfoLength-4] != "-" {
- return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
+ return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
}
mount := &MountInfo{
@@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
- return nil, fmt.Errorf("failed to parse mount ID")
+ return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
}
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse parent ID")
+ return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
}
}
return mount, nil
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828bb1da7..75a3b6c810ff 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -44,6 +44,14 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
+
+ // kernel version >= 4.14 MaxLen
+ // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+ fieldTransport11RDMAMaxLen = 28
+
+ // kernel version <= 4.2 MinLen
+ // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+ fieldTransport11RDMAMinLen = 20
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -80,7 +88,7 @@ type MountStatsNFS struct {
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
- Transport NFSTransportStats
+ Transport []NFSTransportStats
}
// mountStats implements MountStats.
@@ -231,6 +239,33 @@ type NFSTransportStats struct {
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue uint64
+
+ // Stats below only available with stat version 1.1.
+ // Transport over RDMA
+
+ // accessed when sending a call
+ ReadChunkCount uint64
+ WriteChunkCount uint64
+ ReplyChunkCount uint64
+ TotalRdmaRequest uint64
+
+ // rarely accessed error counters
+ PullupCopyCount uint64
+ HardwayRegisterCount uint64
+ FailedMarshalCount uint64
+ BadReplyCount uint64
+ MrsRecovered uint64
+ MrsOrphaned uint64
+ MrsAllocated uint64
+ EmptySendctxQ uint64
+
+ // accessed when receiving a reply
+ TotalRdmaReply uint64
+ FixupCopyCount uint64
+ ReplyWaitsForSend uint64
+ LocalInvNeeded uint64
+ NomsgCallCount uint64
+ BcallCount uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@@ -264,7 +299,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
- return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@@ -284,10 +319,11 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
+//
+// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
// Check for specific words appearing at specific indices to ensure
@@ -305,7 +341,7 @@ func parseMount(ss []string) (*Mount, error) {
for _, f := range format {
if ss[f.i] != f.s {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
}
@@ -342,7 +378,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
switch ss[0] {
case fieldOpts:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
if stats.Opts == nil {
stats.Opts = map[string]string{}
@@ -357,7 +393,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
case fieldAge:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
@@ -368,7 +404,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d
case fieldBytes:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
@@ -378,7 +414,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats
case fieldEvents:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
}
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
@@ -388,7 +424,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
- return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
}
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@@ -396,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, err
}
- stats.Transport = *tstats
+ stats.Transport = append(stats.Transport, *tstats)
}
// When encountering "per-operation statistics", we must break this
@@ -427,7 +463,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
- return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldBytesLen)
@@ -456,7 +492,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
- return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldEventsLen)
@@ -520,7 +556,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
if len(ss) < minFields {
- return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
}
// Skip string operation name for integers
@@ -533,7 +569,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
-
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
@@ -571,10 +606,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} else if protocol == "udp" {
expectedLength = fieldTransport10UDPLen
} else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
}
case statVersion11:
var expectedLength int
@@ -582,14 +617,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
expectedLength = fieldTransport11TCPLen
} else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen
+ } else if protocol == "rdma" {
+ expectedLength = fieldTransport11RDMAMinLen
} else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+ (protocol == "rdma" && len(ss) < expectedLength) {
+ return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
- return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@@ -599,7 +637,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: /~https://github.com/prometheus/node_exporter/issues/571.
- ns := make([]uint64, fieldTransport11TCPLen)
+ //
+ // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+ ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
@@ -617,9 +657,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// we set them to 0 here.
if protocol == "udp" {
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ } else if protocol == "tcp" {
+ ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+ } else if protocol == "rdma" {
+ ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
return &NFSTransportStats{
+ // NFS xprt over tcp or udp
Protocol: protocol,
Port: ns[0],
Bind: ns[1],
@@ -631,8 +676,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
- MaximumRPCSlotsUsed: ns[10],
- CumulativeSendingQueue: ns[11],
- CumulativePendingQueue: ns[12],
+
+ // NFS xprt over tcp or udp
+ // And statVersion 1.1
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+
+ // NFS xprt over rdma
+ // And stat Version 1.1
+ ReadChunkCount: ns[13],
+ WriteChunkCount: ns[14],
+ ReplyChunkCount: ns[15],
+ TotalRdmaRequest: ns[16],
+ PullupCopyCount: ns[17],
+ HardwayRegisterCount: ns[18],
+ FailedMarshalCount: ns[19],
+ BadReplyCount: ns[20],
+ MrsRecovered: ns[21],
+ MrsOrphaned: ns[22],
+ MrsAllocated: ns[23],
+ EmptySendctxQ: ns[24],
+ TotalRdmaReply: ns[25],
+ FixupCopyCount: ns[26],
+ ReplyWaitsForSend: ns[27],
+ LocalInvNeeded: ns[28],
+ NomsgCallCount: ns[29],
+ BcallCount: ns[30],
}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 8300daca0545..316df5fbb74e 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
- "strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
@@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
+ Searched uint64
Found uint64
+ New uint64
Invalid uint64
Ignore uint64
+ Delete uint64
+ DeleteList uint64
Insert uint64
InsertFailed uint64
Drop uint64
@@ -55,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
@@ -81,73 +84,35 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- if len(fields) != 17 {
- return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
- }
- entry := &ConntrackStatEntry{}
-
- entries, err := parseConntrackStatField(fields[0])
- if err != nil {
- return nil, err
- }
- entry.Entries = entries
-
- found, err := parseConntrackStatField(fields[2])
- if err != nil {
- return nil, err
- }
- entry.Found = found
-
- invalid, err := parseConntrackStatField(fields[4])
- if err != nil {
- return nil, err
- }
- entry.Invalid = invalid
-
- ignore, err := parseConntrackStatField(fields[5])
- if err != nil {
- return nil, err
- }
- entry.Ignore = ignore
-
- insert, err := parseConntrackStatField(fields[8])
+ entries, err := util.ParseHexUint64s(fields)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
- entry.Insert = insert
-
- insertFailed, err := parseConntrackStatField(fields[9])
- if err != nil {
- return nil, err
+ numEntries := len(entries)
+ if numEntries < 16 || numEntries > 17 {
+ return nil,
+ fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
}
- entry.InsertFailed = insertFailed
- drop, err := parseConntrackStatField(fields[10])
- if err != nil {
- return nil, err
+ stats := &ConntrackStatEntry{
+ Entries: *entries[0],
+ Searched: *entries[1],
+ Found: *entries[2],
+ New: *entries[3],
+ Invalid: *entries[4],
+ Ignore: *entries[5],
+ Delete: *entries[6],
+ DeleteList: *entries[7],
+ Insert: *entries[8],
+ InsertFailed: *entries[9],
+ Drop: *entries[10],
+ EarlyDrop: *entries[11],
}
- entry.Drop = drop
- earlyDrop, err := parseConntrackStatField(fields[11])
- if err != nil {
- return nil, err
+ // Ignore missing search_restart on Linux < 2.6.35.
+ if numEntries == 17 {
+ stats.SearchRestart = *entries[16]
}
- entry.EarlyDrop = earlyDrop
- searchRestart, err := parseConntrackStatField(fields[16])
- if err != nil {
- return nil, err
- }
- entry.SearchRestart = searchRestart
-
- return entry, nil
-}
-
-// Parses a uint64 from given hex in string.
-func parseConntrackStatField(field string) (uint64, error) {
- val, err := strconv.ParseUint(field, 16, 64)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
- }
- return val, err
+ return stats, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index 7fd57d7f463b..b70f1fc7a4ab 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -50,10 +50,13 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
+ // Drops shows the total number of dropped packets of all UPD sockets.
+ Drops *uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
@@ -66,6 +69,7 @@ type (
RxQueue uint64
UID uint64
Inode uint64
+ Drops *uint64
}
)
@@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
defer f.Close()
var netIPSocket NetIPSocket
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
@@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
+ var udpPacketDrops uint64
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
+ if isUDP {
+ udpPacketDrops += *line.Drops
+ netIPSocketSummary.Drops = &udpPacketDrops
+ }
}
if err := s.Err(); err != nil {
return nil, err
@@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
- return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
+ return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@@ -144,16 +155,17 @@ func parseIP(hexIP string) (net.IP, error) {
}
return i, nil
default:
- return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
+ return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
- "cannot parse net socket line as it has less then 10 columns %q",
+ "%w: Less than 10 columns found %q",
+ ErrFileParse,
strings.Join(fields, " "),
)
}
@@ -162,64 +174,74 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
- return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
+ return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
- return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
+ return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
- return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
+ return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
- "cannot parse tx/rx queues in socket line as it has a missing colon %q",
+ "%w: Missing colon for tx/rx queues in socket line %q",
+ ErrFileParse,
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+ }
+
+ // drops
+ if isUDP {
+ drops, err := strconv.ParseUint(fields[12], 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+ }
+ line.Drops = &drops
}
return line, nil
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index 374b6f73f821..b6c77b709fae 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
} else if fields[6] == disabled {
line.Slab = false
} else {
- return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
+ return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
- return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
+ return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
return nil
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 000000000000..deb7029fe1ef
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+const (
+ blackholeRepresentation string = "*"
+ blackholeIfaceName string = "blackhole"
+ routeLineColumns int = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+ Iface string
+ Destination uint32
+ Gateway uint32
+ Flags uint32
+ RefCnt uint32
+ Use uint32
+ Metric uint32
+ Mask uint32
+ MTU uint32
+ Window uint32
+ IRTT uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+ return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+ b, err := util.ReadFileNoStat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ routelines, err := parseNetRoute(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+ }
+ return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+ var routelines []NetRouteLine
+
+ scanner := bufio.NewScanner(r)
+ scanner.Scan()
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ routeline, err := parseNetRouteLine(fields)
+ if err != nil {
+ return nil, err
+ }
+ routelines = append(routelines, *routeline)
+ }
+ return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+ if len(fields) != routeLineColumns {
+ return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+ }
+ iface := fields[0]
+ if iface == blackholeRepresentation {
+ iface = blackholeIfaceName
+ }
+ destination, err := strconv.ParseUint(fields[1], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ gateway, err := strconv.ParseUint(fields[2], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ flags, err := strconv.ParseUint(fields[3], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ use, err := strconv.ParseUint(fields[5], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ metric, err := strconv.ParseUint(fields[6], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ mask, err := strconv.ParseUint(fields[7], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ mtu, err := strconv.ParseUint(fields[8], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ window, err := strconv.ParseUint(fields[9], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ irtt, err := strconv.ParseUint(fields[10], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ routeline := &NetRouteLine{
+ Iface: iface,
+ Destination: uint32(destination),
+ Gateway: uint32(gateway),
+ Flags: uint32(flags),
+ RefCnt: uint32(refcnt),
+ Use: uint32(use),
+ Metric: uint32(metric),
+ Mask: uint32(mask),
+ MTU: uint32(mtu),
+ Window: uint32(window),
+ IRTT: uint32(irtt),
+ }
+ return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
index e36f4872dd62..fae62b13d961 100644
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -16,7 +16,6 @@ package procfs
import (
"bufio"
"bytes"
- "errors"
"fmt"
"io"
"strings"
@@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
+ return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ")
if len(fields) < 3 {
- return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
+ return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
}
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
- return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
+ return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
@@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 {
- return nil, errors.New("odd number of fields in key/value pairs")
+ return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
}
// Iterate two values at a time to gather key/value pairs.
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index a94f86dc4ae6..71c8059f4d77 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,8 +27,9 @@ import (
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
@@ -38,6 +39,18 @@ type SoftnetStat struct {
Dropped uint32
// Number of times processing packets ran out of quota.
TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
}
var softNetProcFile = "net/softnet_stat"
@@ -51,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
+ return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil
@@ -63,25 +76,65 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r)
var stats []SoftnetStat
+ cpuIndex := 0
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
+ softnetStat := SoftnetStat{}
if width < minColumns {
- return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
+ return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
}
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
+ }
+
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
}
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ } else {
+ // For older kernels, create the Index based on the scan line number.
+ softnetStat.Index = uint32(cpuIndex)
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
+ cpuIndex++
}
return stats, nil
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 000000000000..13994c1782f4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+ // number of TX sessions currently installed where host handles cryptography
+ TLSCurrTxSw int
+ // number of RX sessions currently installed where host handles cryptography
+ TLSCurrRxSw int
+ // number of TX sessions currently installed where NIC handles cryptography
+ TLSCurrTxDevice int
+ // number of RX sessions currently installed where NIC handles cryptography
+ TLSCurrRxDevice int
+ //number of TX sessions opened with host cryptography
+ TLSTxSw int
+ //number of RX sessions opened with host cryptography
+ TLSRxSw int
+ // number of TX sessions opened with NIC cryptography
+ TLSTxDevice int
+ // number of RX sessions opened with NIC cryptography
+ TLSRxDevice int
+ // record decryption failed (e.g. due to incorrect authentication tag)
+ TLSDecryptError int
+ // number of RX resyncs sent to NICs handling cryptography
+ TLSRxDeviceResync int
+ // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+ TLSDecryptRetry int
+ // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+ TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+ file, err := os.Open(fs.proc.Path("net/tls_stat"))
+ if err != nil {
+ return TLSStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ tlsstat = TLSStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ switch name {
+ case "TlsCurrTxSw":
+ tlsstat.TLSCurrTxSw = value
+ case "TlsCurrRxSw":
+ tlsstat.TLSCurrRxSw = value
+ case "TlsCurrTxDevice":
+ tlsstat.TLSCurrTxDevice = value
+ case "TlsCurrRxDevice":
+ tlsstat.TLSCurrRxDevice = value
+ case "TlsTxSw":
+ tlsstat.TLSTxSw = value
+ case "TlsRxSw":
+ tlsstat.TLSRxSw = value
+ case "TlsTxDevice":
+ tlsstat.TLSTxDevice = value
+ case "TlsRxDevice":
+ tlsstat.TLSRxDevice = value
+ case "TlsDecryptError":
+ tlsstat.TLSDecryptError = value
+ case "TlsRxDeviceResync":
+ tlsstat.TLSRxDeviceResync = value
+ case "TlsDecryptRetry":
+ tlsstat.TLSDecryptRetry = value
+ case "TlsRxNoPadViolation":
+ tlsstat.TLSRxNoPadViolation = value
+ }
+
+ }
+
+ return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index 98aa8e1c31c1..d868cebdaae8 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
- return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
@@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
l := len(fields)
if l < min {
- return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
}
// Field offsets are as follows:
@@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
+ return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
- return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
+ return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
- return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
+ return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
- return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
+ return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
- return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
+ return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 000000000000..7c597bc87089
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+ Name string
+
+ // Status is the current 4-digit hex value status of the interface.
+ Status uint64
+
+ // QualityLink is the link quality.
+ QualityLink int
+
+ // QualityLevel is the signal gain (dBm).
+ QualityLevel int
+
+ // QualityNoise is the signal noise baseline (dBm).
+ QualityNoise int
+
+ // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+ DiscardedNwid int
+
+ // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+ DiscardedCrypt int
+
+ // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+ DiscardedFrag int
+
+ // DiscardedRetry is the number of discarded packets that reached max MAC retries.
+ DiscardedRetry int
+
+ // DiscardedMisc is the number of discarded packets for other reasons.
+ DiscardedMisc int
+
+ // MissedBeacon is the number of missed beacons/superframe.
+ MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := parseWireless(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+ }
+
+ return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-| Quality | Discarded packets | Missed | WE
+face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
+ eth1: 0000 5. -256. -10. 0 1 0 3 0 0
+ eth2: 0000 5. -256. -20. 0 2 0 4 0 0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+ var (
+ interfaces []*Wireless
+ scanner = bufio.NewScanner(r)
+ )
+
+ for n := 0; scanner.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line := scanner.Text()
+
+ parts := strings.Split(line, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+ }
+
+ name := strings.TrimSpace(parts[0])
+ stats := strings.Fields(parts[1])
+
+ if len(stats) < 10 {
+ return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+ }
+
+ status, err := strconv.ParseUint(stats[0], 16, 16)
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+ }
+
+ qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+ }
+
+ qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+ }
+
+ qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+ }
+
+ dnwid, err := strconv.Atoi(stats[4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+ }
+
+ dcrypt, err := strconv.Atoi(stats[5])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+ }
+
+ dfrag, err := strconv.Atoi(stats[6])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+ }
+
+ dretry, err := strconv.Atoi(stats[7])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+ }
+
+ dmisc, err := strconv.Atoi(stats[8])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+ }
+
+ mbeacon, err := strconv.Atoi(stats[9])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+ }
+
+ w := &Wireless{
+ Name: name,
+ Status: status,
+ QualityLink: qlink,
+ QualityLevel: qlevel,
+ QualityNoise: qnoise,
+ DiscardedNwid: dnwid,
+ DiscardedCrypt: dcrypt,
+ DiscardedFrag: dfrag,
+ DiscardedRetry: dretry,
+ DiscardedMisc: dmisc,
+ MissedBeacon: mbeacon,
+ }
+
+ interfaces = append(interfaces, w)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+ }
+
+ return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
index f9d9d243db38..932ef2046847 100644
--- a/vendor/github.com/prometheus/procfs/net_xfrm.go
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
+ return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
}
name := fields[0]
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
index dcea9c5a671f..742dff453ba8 100644
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -37,32 +37,46 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat
for _, filePath := range statFiles {
- file, err := os.Open(filePath)
+ procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
+ procNetstat.Filename = filepath.Base(filePath)
- netStatFile := NetStat{
- Filename: filepath.Base(filePath),
- Stats: make(map[string][]uint64),
- }
- scanner := bufio.NewScanner(file)
- scanner.Scan()
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+ netStat := NetStat{
+ Stats: make(map[string][]uint64),
+ }
+ file, err := os.Open(filePath)
+ if err != nil {
+ return netStat, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ scanner.Scan()
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return nil, err
- }
- netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
}
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
}
- netStatsTotal = append(netStatsTotal, netStatFile)
}
- return netStatsTotal, nil
+
+ return netStat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index c30223af72ad..142796368fef 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -15,13 +15,13 @@ package procfs
import (
"bytes"
+ "errors"
"fmt"
"io"
"os"
"strconv"
"strings"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -30,12 +30,18 @@ type Proc struct {
// The process ID.
PID int
- fs fs.FS
+ fs FS
}
// Procs represents a list of Proc structs.
type Procs []Proc
+var (
+ ErrFileParse = errors.New("Error Parsing File")
+ ErrFileRead = errors.New("Error Reading File")
+ ErrMountPoint = errors.New("Error Accessing Mount point")
+)
+
func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
// Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) {
fs, err := NewFS(DefaultMountPoint)
- if err != nil {
+ if err != nil || errors.Unwrap(err) == ErrMountPoint {
return Proc{}, err
}
return fs.Self()
@@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
- return Proc{PID: pid, fs: fs.proc}, nil
+ return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently available processes.
@@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
- p = append(p, Proc{PID: int(pid), fs: fs.proc})
+ p = append(p, Proc{PID: int(pid), fs: fs})
}
return p, nil
@@ -131,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
- return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
}
// Wchan returns the wchan (wait channel) of a process.
@@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
- return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
+ return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
+ // Use fast path if available (Linux v6.2): /~https://github.com/torvalds/linux/commit/f1f1f2569901
+ if p.fs.isReal {
+ stat, err := os.Stat(p.path("fd"))
+ if err != nil {
+ return 0, err
+ }
+
+ size := stat.Size()
+ if size > 0 {
+ return int(size), nil
+ }
+ }
+
fds, err := p.fileDescriptors()
if err != nil {
return 0, err
@@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil
}
func (p Proc) path(pa ...string) string {
- return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+ return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index cca03327c3fe..daeed7f571af 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
@@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
- return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
+ return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
}
cgroup := &Cgroup{
@@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
- return nil, fmt.Errorf("failed to parse hierarchy ID")
+ return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
index 24d4dce9cfc7..5dd4938999ad 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroups.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
fields := strings.Fields(CgroupSummaryStr)
// require at least 4 fields
if len(fields) < 4 {
- return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr)
+ return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
}
CgroupSummary := &CgroupSummary{
@@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
}
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse hierarchy ID")
+ return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
}
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
if err != nil {
- return nil, fmt.Errorf("failed to parse Cgroup Num")
+ return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
}
CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
if err != nil {
- return nil, fmt.Errorf("failed to parse Enabled")
+ return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
}
return CgroupSummary, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index 1bbdd4a8e998..fa761b35295e 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -26,6 +26,7 @@ var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+ rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
@@ -40,6 +41,8 @@ type ProcFDInfo struct {
Flags string
// Mount point ID
MntID string
+ // Inode number
+ Ino string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
@@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
return nil, err
}
- var text, pos, flags, mntid string
+ var text, pos, flags, mntid, ino string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
+ } else if rIno.MatchString(text) {
+ ino = rIno.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
@@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
Pos: pos,
Flags: flags,
MntID: mntid,
+ Ino: ino,
InotifyInfos: inotify,
}
@@ -111,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
}
return i, nil
}
- return nil, fmt.Errorf("invalid inode entry: %q", line)
+ return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 000000000000..86b4b4524632
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 7a1388185a97..9530b14bc681 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) {
//fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
+ return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
}
switch fields[1] {
@@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
- return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
+ return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index f1bcbf32bb3d..7e75c286b5b4 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -63,17 +63,17 @@ type ProcMap struct {
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
- toks := strings.Split(s, ":")
- if len(toks) < 2 {
- return 0, fmt.Errorf("unexpected number of fields")
+ i := strings.Index(s, ":")
+ if i == -1 {
+ return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
}
- major, err := strconv.ParseUint(toks[0], 16, 0)
+ major, err := strconv.ParseUint(s[0:i], 16, 0)
if err != nil {
return 0, err
}
- minor, err := strconv.ParseUint(toks[1], 16, 0)
+ minor, err := strconv.ParseUint(s[i+1:], 16, 0)
if err != nil {
return 0, err
}
@@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) {
// parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) {
- toks := strings.Split(s, "-")
- if len(toks) < 2 {
- return 0, 0, fmt.Errorf("invalid address")
+ idx := strings.Index(s, "-")
+ if idx == -1 {
+ return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
}
- saddr, err := parseAddress(toks[0])
+ saddr, err := parseAddress(s[0:idx])
if err != nil {
return 0, 0, err
}
- eaddr, err := parseAddress(toks[1])
+ eaddr, err := parseAddress(s[idx+1:])
if err != nil {
return 0, 0, err
}
@@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) {
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
- return nil, fmt.Errorf("invalid permissions token")
+ return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
}
perms := ProcMapPermissions{}
@@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) {
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
- return nil, fmt.Errorf("truncated procmap entry")
+ return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
}
saddr, eaddr, err := parseAddresses(fields[0])
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
index 48b5238194e8..8e3ff4d794ba 100644
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -33,139 +33,140 @@ type ProcNetstat struct {
}
type TcpExt struct { // nolint:revive
- SyncookiesSent float64
- SyncookiesRecv float64
- SyncookiesFailed float64
- EmbryonicRsts float64
- PruneCalled float64
- RcvPruned float64
- OfoPruned float64
- OutOfWindowIcmps float64
- LockDroppedIcmps float64
- ArpFilter float64
- TW float64
- TWRecycled float64
- TWKilled float64
- PAWSActive float64
- PAWSEstab float64
- DelayedACKs float64
- DelayedACKLocked float64
- DelayedACKLost float64
- ListenOverflows float64
- ListenDrops float64
- TCPHPHits float64
- TCPPureAcks float64
- TCPHPAcks float64
- TCPRenoRecovery float64
- TCPSackRecovery float64
- TCPSACKReneging float64
- TCPSACKReorder float64
- TCPRenoReorder float64
- TCPTSReorder float64
- TCPFullUndo float64
- TCPPartialUndo float64
- TCPDSACKUndo float64
- TCPLossUndo float64
- TCPLostRetransmit float64
- TCPRenoFailures float64
- TCPSackFailures float64
- TCPLossFailures float64
- TCPFastRetrans float64
- TCPSlowStartRetrans float64
- TCPTimeouts float64
- TCPLossProbes float64
- TCPLossProbeRecovery float64
- TCPRenoRecoveryFail float64
- TCPSackRecoveryFail float64
- TCPRcvCollapsed float64
- TCPDSACKOldSent float64
- TCPDSACKOfoSent float64
- TCPDSACKRecv float64
- TCPDSACKOfoRecv float64
- TCPAbortOnData float64
- TCPAbortOnClose float64
- TCPAbortOnMemory float64
- TCPAbortOnTimeout float64
- TCPAbortOnLinger float64
- TCPAbortFailed float64
- TCPMemoryPressures float64
- TCPMemoryPressuresChrono float64
- TCPSACKDiscard float64
- TCPDSACKIgnoredOld float64
- TCPDSACKIgnoredNoUndo float64
- TCPSpuriousRTOs float64
- TCPMD5NotFound float64
- TCPMD5Unexpected float64
- TCPMD5Failure float64
- TCPSackShifted float64
- TCPSackMerged float64
- TCPSackShiftFallback float64
- TCPBacklogDrop float64
- PFMemallocDrop float64
- TCPMinTTLDrop float64
- TCPDeferAcceptDrop float64
- IPReversePathFilter float64
- TCPTimeWaitOverflow float64
- TCPReqQFullDoCookies float64
- TCPReqQFullDrop float64
- TCPRetransFail float64
- TCPRcvCoalesce float64
- TCPOFOQueue float64
- TCPOFODrop float64
- TCPOFOMerge float64
- TCPChallengeACK float64
- TCPSYNChallenge float64
- TCPFastOpenActive float64
- TCPFastOpenActiveFail float64
- TCPFastOpenPassive float64
- TCPFastOpenPassiveFail float64
- TCPFastOpenListenOverflow float64
- TCPFastOpenCookieReqd float64
- TCPFastOpenBlackhole float64
- TCPSpuriousRtxHostQueues float64
- BusyPollRxPackets float64
- TCPAutoCorking float64
- TCPFromZeroWindowAdv float64
- TCPToZeroWindowAdv float64
- TCPWantZeroWindowAdv float64
- TCPSynRetrans float64
- TCPOrigDataSent float64
- TCPHystartTrainDetect float64
- TCPHystartTrainCwnd float64
- TCPHystartDelayDetect float64
- TCPHystartDelayCwnd float64
- TCPACKSkippedSynRecv float64
- TCPACKSkippedPAWS float64
- TCPACKSkippedSeq float64
- TCPACKSkippedFinWait2 float64
- TCPACKSkippedTimeWait float64
- TCPACKSkippedChallenge float64
- TCPWinProbe float64
- TCPKeepAlive float64
- TCPMTUPFail float64
- TCPMTUPSuccess float64
- TCPWqueueTooBig float64
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
}
type IpExt struct { // nolint:revive
- InNoRoutes float64
- InTruncatedPkts float64
- InMcastPkts float64
- OutMcastPkts float64
- InBcastPkts float64
- OutBcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InCsumErrors float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
- ReasmOverlaps float64
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
}
func (p Proc) Netstat() (ProcNetstat, error) {
@@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
if err != nil {
return ProcNetstat{PID: p.PID}, err
}
- procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
procNetstat.PID = p.PID
return procNetstat, err
}
-// parseNetstat parses the metrics from proc//net/netstat file
+// parseProcNetstat parses the metrics from proc//net/netstat file
// and returns a ProcNetstat structure.
-func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
var (
scanner = bufio.NewScanner(r)
procNetstat = ProcNetstat{}
@@ -194,8 +195,8 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
// Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) {
- return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s",
- fileName, protocol)
+ return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
}
for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64)
@@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = value
+ procNetstat.TcpExt.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = value
+ procNetstat.TcpExt.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = value
+ procNetstat.TcpExt.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = value
+ procNetstat.TcpExt.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = value
+ procNetstat.TcpExt.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = value
+ procNetstat.TcpExt.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = value
+ procNetstat.TcpExt.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = value
+ procNetstat.TcpExt.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = value
+ procNetstat.TcpExt.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = value
+ procNetstat.TcpExt.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = value
+ procNetstat.TcpExt.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = value
+ procNetstat.TcpExt.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = value
+ procNetstat.TcpExt.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = value
+ procNetstat.TcpExt.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = value
+ procNetstat.TcpExt.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = value
+ procNetstat.TcpExt.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = value
+ procNetstat.TcpExt.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = value
+ procNetstat.TcpExt.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = value
+ procNetstat.TcpExt.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = value
+ procNetstat.TcpExt.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = value
+ procNetstat.TcpExt.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = value
+ procNetstat.TcpExt.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = value
+ procNetstat.TcpExt.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = value
+ procNetstat.TcpExt.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = value
+ procNetstat.TcpExt.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = value
+ procNetstat.TcpExt.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = value
+ procNetstat.TcpExt.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = value
+ procNetstat.TcpExt.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = value
+ procNetstat.TcpExt.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = value
+ procNetstat.TcpExt.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = value
+ procNetstat.TcpExt.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = value
+ procNetstat.TcpExt.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = value
+ procNetstat.TcpExt.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = value
+ procNetstat.TcpExt.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = value
+ procNetstat.TcpExt.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = value
+ procNetstat.TcpExt.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = value
+ procNetstat.TcpExt.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = value
+ procNetstat.TcpExt.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = value
+ procNetstat.TcpExt.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = value
+ procNetstat.TcpExt.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = value
+ procNetstat.TcpExt.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = value
+ procNetstat.TcpExt.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = value
+ procNetstat.TcpExt.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = value
+ procNetstat.TcpExt.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = value
+ procNetstat.TcpExt.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = value
+ procNetstat.TcpExt.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = value
+ procNetstat.TcpExt.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = value
+ procNetstat.TcpExt.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = value
+ procNetstat.TcpExt.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = value
+ procNetstat.TcpExt.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = value
+ procNetstat.TcpExt.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = value
+ procNetstat.TcpExt.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = value
+ procNetstat.TcpExt.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = value
+ procNetstat.TcpExt.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = value
+ procNetstat.TcpExt.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = value
+ procNetstat.TcpExt.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = value
+ procNetstat.TcpExt.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = value
+ procNetstat.TcpExt.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TcpExt.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = value
+ procNetstat.TcpExt.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = value
+ procNetstat.TcpExt.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = value
+ procNetstat.TcpExt.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = value
+ procNetstat.TcpExt.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = value
+ procNetstat.TcpExt.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = value
+ procNetstat.TcpExt.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = value
+ procNetstat.TcpExt.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = value
+ procNetstat.TcpExt.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = value
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = value
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = value
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = value
+ procNetstat.TcpExt.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = value
+ procNetstat.TcpExt.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = value
+ procNetstat.TcpExt.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = value
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = value
+ procNetstat.TcpExt.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = value
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = value
+ procNetstat.TcpExt.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = value
+ procNetstat.TcpExt.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = value
+ procNetstat.TcpExt.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = value
+ procNetstat.TcpExt.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = value
+ procNetstat.TcpExt.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = value
+ procNetstat.TcpExt.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = value
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = value
+ procNetstat.TcpExt.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = value
+ procNetstat.TcpExt.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = value
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = value
+ procNetstat.TcpExt.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = value
+ procNetstat.TcpExt.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = value
+ procNetstat.TcpExt.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = value
+ procNetstat.TcpExt.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = value
+ procNetstat.TcpExt.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = value
+ procNetstat.TcpExt.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = value
+ procNetstat.IpExt.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = value
+ procNetstat.IpExt.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = value
+ procNetstat.IpExt.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = value
+ procNetstat.IpExt.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = value
+ procNetstat.IpExt.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = value
+ procNetstat.IpExt.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = value
+ procNetstat.IpExt.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = value
+ procNetstat.IpExt.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = value
+ procNetstat.IpExt.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = value
+ procNetstat.IpExt.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = value
+ procNetstat.IpExt.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = value
+ procNetstat.IpExt.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = value
+ procNetstat.IpExt.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = value
+ procNetstat.IpExt.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = value
+ procNetstat.IpExt.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = value
+ procNetstat.IpExt.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = value
+ procNetstat.IpExt.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = value
+ procNetstat.IpExt.ReasmOverlaps = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index 391b4cbd11b9..0f8f847f954b 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
+ return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
- return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
+ return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
- return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
+ return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index a68fe15290a8..ccd35f153a05 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -61,14 +61,14 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
- return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
+ return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
- return parsePSIStats(resource, bytes.NewReader(data))
+ return parsePSIStats(bytes.NewReader(data))
}
// parsePSIStats parses the specified file for pressure stall information.
-func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
+func parsePSIStats(r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
scanner := bufio.NewScanner(r)
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index 0e97d99575e7..09060e820803 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
v := strings.TrimSpace(kv[1])
- v = strings.TrimRight(v, " kB")
+ v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
@@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
vBytes := vKBytes * 1024
- s.addValue(k, v, vKBytes, vBytes)
+ s.addValue(k, vBytes)
return nil
}
-func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
index ae191896cbd7..b9d2cf642a74 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -37,100 +37,100 @@ type ProcSnmp struct {
}
type Ip struct { // nolint:revive
- Forwarding float64
- DefaultTTL float64
- InReceives float64
- InHdrErrors float64
- InAddrErrors float64
- ForwDatagrams float64
- InUnknownProtos float64
- InDiscards float64
- InDelivers float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
}
-type Icmp struct {
- InMsgs float64
- InErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InTimeExcds float64
- InParmProbs float64
- InSrcQuenchs float64
- InRedirects float64
- InEchos float64
- InEchoReps float64
- InTimestamps float64
- InTimestampReps float64
- InAddrMasks float64
- InAddrMaskReps float64
- OutMsgs float64
- OutErrors float64
- OutDestUnreachs float64
- OutTimeExcds float64
- OutParmProbs float64
- OutSrcQuenchs float64
- OutRedirects float64
- OutEchos float64
- OutEchoReps float64
- OutTimestamps float64
- OutTimestampReps float64
- OutAddrMasks float64
- OutAddrMaskReps float64
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
}
type IcmpMsg struct {
- InType3 float64
- OutType3 float64
+ InType3 *float64
+ OutType3 *float64
}
type Tcp struct { // nolint:revive
- RtoAlgorithm float64
- RtoMin float64
- RtoMax float64
- MaxConn float64
- ActiveOpens float64
- PassiveOpens float64
- AttemptFails float64
- EstabResets float64
- CurrEstab float64
- InSegs float64
- OutSegs float64
- RetransSegs float64
- InErrs float64
- OutRsts float64
- InCsumErrors float64
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
}
type Udp struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
func (p Proc) Snmp() (ProcSnmp, error) {
@@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
// Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) {
- return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s",
- fileName, protocol)
+ return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
}
for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64)
@@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = value
+ procSnmp.Ip.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = value
+ procSnmp.Ip.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = value
+ procSnmp.Ip.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = value
+ procSnmp.Ip.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = value
+ procSnmp.Ip.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = value
+ procSnmp.Ip.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = value
+ procSnmp.Ip.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = value
+ procSnmp.Ip.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = value
+ procSnmp.Ip.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = value
+ procSnmp.Ip.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = value
+ procSnmp.Ip.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = value
+ procSnmp.Ip.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = value
+ procSnmp.Ip.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = value
+ procSnmp.Ip.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = value
+ procSnmp.Ip.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = value
+ procSnmp.Ip.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = value
+ procSnmp.Ip.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = value
+ procSnmp.Ip.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = value
+ procSnmp.Ip.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = value
+ procSnmp.Icmp.InMsgs = &value
case "InErrors":
- procSnmp.Icmp.InErrors = value
+ procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = value
+ procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = value
+ procSnmp.Icmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = value
+ procSnmp.Icmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = value
+ procSnmp.Icmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = value
+ procSnmp.Icmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = value
+ procSnmp.Icmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = value
+ procSnmp.Icmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = value
+ procSnmp.Icmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = value
+ procSnmp.Icmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = value
+ procSnmp.Icmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = value
+ procSnmp.Icmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = value
+ procSnmp.Icmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = value
+ procSnmp.Icmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = value
+ procSnmp.Icmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = value
+ procSnmp.Icmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = value
+ procSnmp.Icmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = value
+ procSnmp.Icmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = value
+ procSnmp.Icmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = value
+ procSnmp.Icmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = value
+ procSnmp.Icmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = value
+ procSnmp.Icmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = value
+ procSnmp.Icmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = value
+ procSnmp.Icmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = value
+ procSnmp.Icmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = value
+ procSnmp.Icmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = value
+ procSnmp.IcmpMsg.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = value
+ procSnmp.IcmpMsg.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = value
+ procSnmp.Tcp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = value
+ procSnmp.Tcp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = value
+ procSnmp.Tcp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = value
+ procSnmp.Tcp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = value
+ procSnmp.Tcp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = value
+ procSnmp.Tcp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = value
+ procSnmp.Tcp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = value
+ procSnmp.Tcp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = value
+ procSnmp.Tcp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = value
+ procSnmp.Tcp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = value
+ procSnmp.Tcp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = value
+ procSnmp.Tcp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = value
+ procSnmp.Tcp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = value
+ procSnmp.Tcp.OutRsts = &value
case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = value
+ procSnmp.Tcp.InCsumErrors = &value
}
case "Udp":
switch key {
case "InDatagrams":
- procSnmp.Udp.InDatagrams = value
+ procSnmp.Udp.InDatagrams = &value
case "NoPorts":
- procSnmp.Udp.NoPorts = value
+ procSnmp.Udp.NoPorts = &value
case "InErrors":
- procSnmp.Udp.InErrors = value
+ procSnmp.Udp.InErrors = &value
case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = value
+ procSnmp.Udp.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = value
+ procSnmp.Udp.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = value
+ procSnmp.Udp.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = value
+ procSnmp.Udp.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = value
+ procSnmp.Udp.IgnoredMulti = &value
}
case "UdpLite":
switch key {
case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = value
+ procSnmp.UdpLite.InDatagrams = &value
case "NoPorts":
- procSnmp.UdpLite.NoPorts = value
+ procSnmp.UdpLite.NoPorts = &value
case "InErrors":
- procSnmp.UdpLite.InErrors = value
+ procSnmp.UdpLite.InErrors = &value
case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = value
+ procSnmp.UdpLite.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = value
+ procSnmp.UdpLite.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = value
+ procSnmp.UdpLite.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = value
+ procSnmp.UdpLite.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = value
+ procSnmp.UdpLite.IgnoredMulti = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
index f611992d52ca..3059cc6a1367 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -36,106 +36,106 @@ type ProcSnmp6 struct {
}
type Ip6 struct { // nolint:revive
- InReceives float64
- InHdrErrors float64
- InTooBigErrors float64
- InNoRoutes float64
- InAddrErrors float64
- InUnknownProtos float64
- InTruncatedPkts float64
- InDiscards float64
- InDelivers float64
- OutForwDatagrams float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
- InMcastPkts float64
- OutMcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
}
type Icmp6 struct {
- InMsgs float64
- InErrors float64
- OutMsgs float64
- OutErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InPktTooBigs float64
- InTimeExcds float64
- InParmProblems float64
- InEchos float64
- InEchoReplies float64
- InGroupMembQueries float64
- InGroupMembResponses float64
- InGroupMembReductions float64
- InRouterSolicits float64
- InRouterAdvertisements float64
- InNeighborSolicits float64
- InNeighborAdvertisements float64
- InRedirects float64
- InMLDv2Reports float64
- OutDestUnreachs float64
- OutPktTooBigs float64
- OutTimeExcds float64
- OutParmProblems float64
- OutEchos float64
- OutEchoReplies float64
- OutGroupMembQueries float64
- OutGroupMembResponses float64
- OutGroupMembReductions float64
- OutRouterSolicits float64
- OutRouterAdvertisements float64
- OutNeighborSolicits float64
- OutNeighborAdvertisements float64
- OutRedirects float64
- OutMLDv2Reports float64
- InType1 float64
- InType134 float64
- InType135 float64
- InType136 float64
- InType143 float64
- OutType133 float64
- OutType135 float64
- OutType136 float64
- OutType143 float64
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
}
type Udp6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
}
func (p Proc) Snmp6() (ProcSnmp6, error) {
@@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = value
+ procSnmp6.Ip6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = value
+ procSnmp6.Ip6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = value
+ procSnmp6.Ip6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = value
+ procSnmp6.Ip6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = value
+ procSnmp6.Ip6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = value
+ procSnmp6.Ip6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = value
+ procSnmp6.Ip6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = value
+ procSnmp6.Ip6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = value
+ procSnmp6.Ip6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = value
+ procSnmp6.Ip6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = value
+ procSnmp6.Ip6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = value
+ procSnmp6.Ip6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = value
+ procSnmp6.Ip6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = value
+ procSnmp6.Ip6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = value
+ procSnmp6.Ip6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = value
+ procSnmp6.Ip6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = value
+ procSnmp6.Ip6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = value
+ procSnmp6.Ip6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = value
+ procSnmp6.Ip6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = value
+ procSnmp6.Ip6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = value
+ procSnmp6.Ip6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = value
+ procSnmp6.Ip6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = value
+ procSnmp6.Ip6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = value
+ procSnmp6.Ip6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = value
+ procSnmp6.Ip6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = value
+ procSnmp6.Ip6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = value
+ procSnmp6.Ip6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = value
+ procSnmp6.Ip6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = value
+ procSnmp6.Ip6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = value
+ procSnmp6.Ip6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = value
+ procSnmp6.Ip6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = value
+ procSnmp6.Ip6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = value
+ procSnmp6.Icmp6.InMsgs = &value
case "InErrors":
- procSnmp6.Icmp6.InErrors = value
+ procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = value
+ procSnmp6.Icmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = value
+ procSnmp6.Icmp6.OutErrors = &value
case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = value
+ procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = value
+ procSnmp6.Icmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = value
+ procSnmp6.Icmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = value
+ procSnmp6.Icmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = value
+ procSnmp6.Icmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = value
+ procSnmp6.Icmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = value
+ procSnmp6.Icmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = value
+ procSnmp6.Icmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = value
+ procSnmp6.Icmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = value
+ procSnmp6.Icmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = value
+ procSnmp6.Icmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = value
+ procSnmp6.Icmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = value
+ procSnmp6.Icmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = value
+ procSnmp6.Icmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = value
+ procSnmp6.Icmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = value
+ procSnmp6.Icmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = value
+ procSnmp6.Icmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = value
+ procSnmp6.Icmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = value
+ procSnmp6.Icmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = value
+ procSnmp6.Icmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = value
+ procSnmp6.Icmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = value
+ procSnmp6.Icmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = value
+ procSnmp6.Icmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = value
+ procSnmp6.Icmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = value
+ procSnmp6.Icmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = value
+ procSnmp6.Icmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = value
+ procSnmp6.Icmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = value
+ procSnmp6.Icmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = value
+ procSnmp6.Icmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = value
+ procSnmp6.Icmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = value
+ procSnmp6.Icmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = value
+ procSnmp6.Icmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = value
+ procSnmp6.Icmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = value
+ procSnmp6.Icmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = value
+ procSnmp6.Icmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = value
+ procSnmp6.Icmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = value
+ procSnmp6.Icmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = value
+ procSnmp6.Icmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = value
+ procSnmp6.Icmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = value
+ procSnmp6.Icmp6.OutType143 = &value
}
case "Udp6":
switch key {
case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = value
+ procSnmp6.Udp6.InDatagrams = &value
case "NoPorts":
- procSnmp6.Udp6.NoPorts = value
+ procSnmp6.Udp6.NoPorts = &value
case "InErrors":
- procSnmp6.Udp6.InErrors = value
+ procSnmp6.Udp6.InErrors = &value
case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = value
+ procSnmp6.Udp6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = value
+ procSnmp6.Udp6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = value
+ procSnmp6.Udp6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = value
+ procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = value
+ procSnmp6.Udp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = value
+ procSnmp6.UdpLite6.InDatagrams = &value
case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = value
+ procSnmp6.UdpLite6.NoPorts = &value
case "InErrors":
- procSnmp6.UdpLite6.InErrors = value
+ procSnmp6.UdpLite6.InErrors = &value
case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = value
+ procSnmp6.UdpLite6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = value
+ procSnmp6.UdpLite6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = value
+ procSnmp6.UdpLite6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = value
+ procSnmp6.UdpLite6.InCsumErrors = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 06c556ef9623..06a8d931c983 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -18,7 +18,6 @@ import (
"fmt"
"os"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -102,6 +101,8 @@ type ProcStat struct {
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
@@ -109,8 +110,13 @@ type ProcStat struct {
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
+ // Guest time of the process (time spent running a virtual CPU for a guest
+ // operating system), measured in clock ticks.
+ GuestTime int
+ // Guest time of the process's children, measured in clock ticks.
+ CGuestTime int
- proc fs.FS
+ proc FS
}
// NewStat returns the current status information of the process.
@@ -137,7 +143,7 @@ func (p Proc) Stat() (ProcStat, error) {
)
if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
+ return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
}
s.Comm = string(data[l+1 : r])
@@ -184,10 +190,12 @@ func (p Proc) Stat() (ProcStat, error) {
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
- &ignoreInt64,
+ &s.Processor,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
+ &s.GuestTime,
+ &s.CGuestTime,
)
if err != nil {
return ProcStat{}, err
@@ -208,8 +216,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
- fs := FS{proc: s.proc}
- stat, err := fs.Stat()
+ stat, err := s.proc.Stat()
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 594022ded48a..a055197c63e8 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -15,6 +15,8 @@ package procfs
import (
"bytes"
+ "math/bits"
+ "sort"
"strconv"
"strings"
@@ -22,7 +24,7 @@ import (
)
// ProcStatus provides status information about the process,
-// read from /proc/[pid]/stat.
+// read from /proc/[pid]/status.
type ProcStatus struct {
// The process ID.
PID int
@@ -31,6 +33,8 @@ type ProcStatus struct {
// Thread group ID.
TGID int
+ // List of Pid namespace.
+ NSpids []uint64
// Peak virtual memory size.
VmPeak uint64 // nolint:revive
@@ -73,9 +77,12 @@ type ProcStatus struct {
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]string
+ UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]string
+ GIDs [4]uint64
+
+ // CpusAllowedList: List of cpu cores processes are allowed to run on.
+ CpusAllowedList []uint64
}
// NewStatus returns the current status information of the process.
@@ -96,10 +103,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
kv := strings.SplitN(line, ":", 2)
// removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
// removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
+ v = strings.TrimSuffix(v, " kB")
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
@@ -107,22 +114,39 @@ func (p Proc) NewStatus() (ProcStatus, error) {
// convert kB to B
vBytes := vKBytes * 1024
- s.fillStatus(k, v, vKBytes, vBytes)
+ err = s.fillStatus(k, v, vKBytes, vBytes)
+ if err != nil {
+ return ProcStatus{}, err
+ }
}
return s, nil
}
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
- copy(s.UIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
case "Gid":
- copy(s.GIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
+ case "NSpid":
+ s.NSpids = calcNSPidsList(vString)
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -161,10 +185,54 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
+ case "Cpus_allowed_list":
+ s.CpusAllowedList = calcCpusAllowedList(vString)
}
+
+ return nil
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+ s := strings.Split(cpuString, ",")
+
+ var g []uint64
+
+ for _, cpu := range s {
+ // parse cpu ranges, example: 1-3=[1,2,3]
+ if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+ startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+ endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+ for i := startCPU; i <= endCPU; i++ {
+ g = append(g, i)
+ }
+ } else if len(l) == 1 {
+ cpu, _ := strconv.ParseUint(l[0], 10, 64)
+ g = append(g, cpu)
+ }
+
+ }
+
+ sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+ return g
+}
+
+func calcNSPidsList(nspidsString string) []uint64 {
+ s := strings.Split(nspidsString, " ")
+ var nspids []uint64
+
+ for _, nspid := range s {
+ nspid, _ := strconv.ParseUint(nspid, 10, 64)
+ if nspid == 0 {
+ continue
+ }
+ nspids = append(nspids, nspid)
+ }
+
+ return nspids
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
index d46533ebf419..5eefbe2ef8b3 100644
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f)
values[i] = vp.Int()
if err := vp.Err(); err != nil {
- return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err)
+ return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
}
}
return values, nil
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
index bc9aaf5c2889..8611c901770a 100644
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) {
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
- return nil, fmt.Errorf("unable to parse: %q", line)
+ return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
}
var err error
i := &Slab{Name: s[0]}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
index 559129cbca3c..28708e074595 100644
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
)
if !scanner.Scan() {
- return Softirqs{}, fmt.Errorf("softirqs empty")
+ return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
}
for scanner.Scan() {
@@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TIMER:":
@@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_TX:":
@@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_RX:":
@@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "BLOCK:":
@@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "IRQ_POLL:":
@@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TASKLET:":
@@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "SCHED:":
@@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "HRTIMER:":
@@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "RCU:":
@@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
}
}
}
}
if err := scanner.Err(); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
}
return softirqs, scanner.Err()
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 33f97caa08da..e36b41c18a90 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -62,7 +62,7 @@ type Stat struct {
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
- CPU []CPUStat
+ CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
@@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
}
if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
}
cpuStat.User /= userHZ
@@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
}
return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
+ return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
}
return softIRQStat, total, nil
@@ -170,10 +170,27 @@ func (fs FS) Stat() (Stat, error) {
if err != nil {
return Stat{}, err
}
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
- stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
+
+ // Increase default scanner buffer to handle very long `intr` lines.
+ buf := make([]byte, 0, 8*1024)
+ scanner.Buffer(buf, 1024*1024)
- scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -184,34 +201,34 @@ func (fs FS) Stat() (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@@ -228,16 +245,13 @@ func (fs FS) Stat() (Stat, error) {
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
stat.CPU[cpuID] = cpuStat
}
}
}
if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
}
return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
index 15edc2212b67..65fec834bf41 100644
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) {
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
- return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
+ return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
}
swap := &Swap{
@@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
- return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
+ return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
- return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
+ return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
- return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
+ return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
}
return swap, nil
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 000000000000..80e0e947be78
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+
+ t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+ if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index 20ceb77e2df7..51c49d89e81b 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -26,7 +26,9 @@ import (
)
// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string.
@@ -84,7 +86,7 @@ func (fs FS) VM() (*VM, error) {
return nil, err
}
if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%s is not a directory", path)
+ return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
}
files, err := os.ReadDir(path)
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index c745a4c04ff1..e54d94b09039 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
- return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
- return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index b774da88d86c..4d4b4aad6fe8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -28,6 +28,8 @@ var (
uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1))
+ uintptrType = reflect.TypeOf(uintptr(1))
+
float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1))
@@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
case reflect.Struct:
{
// All structs enter here. We're not interested in most types.
- if !canConvert(obj1Value, timeType) {
+ if !obj1Value.CanConvert(timeType) {
break
}
- // time.Time can compared!
+ // time.Time can be compared!
timeObj1, ok := obj1.(time.Time)
if !ok {
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
@@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
case reflect.Slice:
{
// We only care about the []byte type.
- if !canConvert(obj1Value, bytesType) {
+ if !obj1Value.CanConvert(bytesType) {
break
}
@@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
}
+ case reflect.Uintptr:
+ {
+ uintptrObj1, ok := obj1.(uintptr)
+ if !ok {
+ uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
+ }
+ uintptrObj2, ok := obj2.(uintptr)
+ if !ok {
+ uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
+ }
+ if uintptrObj1 > uintptrObj2 {
+ return compareGreater, true
+ }
+ if uintptrObj1 == uintptrObj2 {
+ return compareEqual, true
+ }
+ if uintptrObj1 < uintptrObj2 {
+ return compareLess, true
+ }
+ }
}
return compareEqual, false
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
deleted file mode 100644
index da867903e2fa..000000000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build go1.17
-// +build go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-// merged/removed with assertion_compare_go1.17_test.go and
-// assertion_compare_legacy.go
-
-package assert
-
-import "reflect"
-
-// Wrapper around reflect.Value.CanConvert, for compatibility
-// reasons.
-func canConvert(value reflect.Value, to reflect.Type) bool {
- return value.CanConvert(to)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
deleted file mode 100644
index 1701af2a3c89..000000000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-// merged/removed with assertion_compare_go1.17_test.go and
-// assertion_compare_can_convert.go
-
-package assert
-
-import "reflect"
-
-// Older versions of Go does not have the reflect.Value.CanConvert
-// method.
-func canConvert(value reflect.Value, to reflect.Type) bool {
- return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 84dbd6c790b9..3ddab109ad9e 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package assert
@@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// EqualValuesf asserts that two objects are equal or convertable to the same types
+// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
@@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index b1d94aec53cc..a84e09bd4090 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package assert
@@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
-// EqualValues asserts that two objects are equal or convertable to the same types
+// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValues(uint32(123), int32(123))
@@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
-// EqualValuesf asserts that two objects are equal or convertable to the same types
+// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
@@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
return NotErrorIsf(a.t, err, target, msg, args...)
}
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// a.NotImplements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplementsf(a.t, interfaceObject, object, msg, args...)
+}
+
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...)
}
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+// a.NotSubset([1, 3, 4], [1, 2])
+// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...)
}
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...)
}
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+// a.Subset([1, 2, 3], [1, 2])
+// a.Subset({"x": 1, "y": 2}, {"x": 1})
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...)
}
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index a55d1bba926c..0b7570f21c63 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -19,7 +19,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- yaml "gopkg.in/yaml.v3"
+ "gopkg.in/yaml.v3"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} {
return result.Interface()
case reflect.Array, reflect.Slice:
- result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
+ var result reflect.Value
+ if expectedKind == reflect.Array {
+ result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
+ } else {
+ result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
+ }
for i := 0; i < expectedValue.Len(); i++ {
index := expectedValue.Index(i)
if isNil(index) {
@@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} {
// structures.
//
// This function does no assertion of any kind.
+//
+// Deprecated: Use [EqualExportedValues] instead.
func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
expectedCleaned := copyExportedFields(expected)
actualCleaned := copyExportedFields(actual)
@@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool {
return true
}
- actualType := reflect.TypeOf(actual)
- if actualType == nil {
+ expectedValue := reflect.ValueOf(expected)
+ actualValue := reflect.ValueOf(actual)
+ if !expectedValue.IsValid() || !actualValue.IsValid() {
return false
}
- expectedValue := reflect.ValueOf(expected)
- if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+
+ expectedType := expectedValue.Type()
+ actualType := actualValue.Type()
+ if !expectedType.ConvertibleTo(actualType) {
+ return false
+ }
+
+ if !isNumericType(expectedType) || !isNumericType(actualType) {
// Attempt comparison after type conversion
- return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+ return reflect.DeepEqual(
+ expectedValue.Convert(actualType).Interface(), actual,
+ )
}
- return false
+ // If BOTH values are numeric, there are chances of false positives due
+ // to overflow or underflow. So, we need to make sure to always convert
+ // the smaller type to a larger type before comparing.
+ if expectedType.Size() >= actualType.Size() {
+ return actualValue.Convert(expectedType).Interface() == expected
+ }
+
+ return expectedValue.Convert(actualType).Interface() == actual
+}
+
+// isNumericType returns true if the type is one of:
+// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
+// float32, float64, complex64, complex128
+func isNumericType(t reflect.Type) bool {
+ return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
}
/* CallerInfo is necessary because the assert functions use the testing object
@@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
// Aligns the provided message so that all lines after the first line start at the same location as the first line.
// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
-// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
// basis on which the alignment occurs).
func indentMessageLines(message string, longestLabelLen int) string {
outBuf := new(bytes.Buffer)
@@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
return true
}
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
+ }
+ if reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
// IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool {
// representations appropriate to be presented to the user.
//
// If the values are not of like type, the returned strings will be prefixed
-// with the type name, and the value will be enclosed in parenthesis similar
+// with the type name, and the value will be enclosed in parentheses similar
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
@@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string {
return value
}
-// EqualValues asserts that two objects are equal or convertable to the same types
+// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
@@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
}
+ if aType.Kind() == reflect.Ptr {
+ aType = aType.Elem()
+ }
+ if bType.Kind() == reflect.Ptr {
+ bType = bType.Elem()
+ }
+
if aType.Kind() != reflect.Struct {
- return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
}
if bType.Kind() != reflect.Struct {
- return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
}
expected = copyExportedFields(expected)
@@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
-// containsKind checks if a specified kind in the slice of kinds.
-func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
- for i := 0; i < len(kinds); i++ {
- if kind == kinds[i] {
- return true
- }
- }
-
- return false
-}
-
// isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool {
if object == nil {
@@ -638,16 +683,13 @@ func isNil(object interface{}) bool {
}
value := reflect.ValueOf(object)
- kind := value.Kind()
- isNilableKind := containsKind(
- []reflect.Kind{
- reflect.Chan, reflect.Func,
- reflect.Interface, reflect.Map,
- reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
- kind)
-
- if isNilableKind && value.IsNil() {
- return true
+ switch value.Kind() {
+ case
+ reflect.Chan, reflect.Func,
+ reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+
+ return value.IsNil()
}
return false
@@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
}
-// getLen try to get length of object.
-// return (false, 0) if impossible.
-func getLen(x interface{}) (ok bool, length int) {
+// getLen tries to get the length of an object.
+// It returns (0, false) if impossible.
+func getLen(x interface{}) (length int, ok bool) {
v := reflect.ValueOf(x)
defer func() {
- if e := recover(); e != nil {
- ok = false
- }
+ ok = recover() == nil
}()
- return true, v.Len()
+ return v.Len(), true
}
// Len asserts that the specified object has specific length.
@@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
- ok, l := getLen(object)
+ l, ok := getLen(object)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
}
if l != length {
- return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
}
return true
}
@@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
}
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+// assert.Subset(t, [1, 2, 3], [1, 2])
+// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true
}
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+// assert.NotSubset(t, [1, 3, 4], [1, 2])
+// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
h.Helper()
}
if math.IsNaN(epsilon) {
- return Fail(t, "epsilon must not be NaN")
+ return Fail(t, "epsilon must not be NaN", msgAndArgs...)
}
actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil {
@@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if expected == nil || actual == nil ||
- reflect.TypeOf(actual).Kind() != reflect.Slice ||
- reflect.TypeOf(expected).Kind() != reflect.Slice {
+
+ if expected == nil || actual == nil {
return Fail(t, "Parameters must be slice", msgAndArgs...)
}
- actualSlice := reflect.ValueOf(actual)
expectedSlice := reflect.ValueOf(expected)
+ actualSlice := reflect.ValueOf(actual)
- for i := 0; i < actualSlice.Len(); i++ {
- result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
- if !result {
- return result
+ if expectedSlice.Type().Kind() != reflect.Slice {
+ return Fail(t, "Expected value must be slice", msgAndArgs...)
+ }
+
+ expectedLen := expectedSlice.Len()
+ if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
+ return false
+ }
+
+ for i := 0; i < expectedLen; i++ {
+ if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
+ return false
}
}
@@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
}
// FailNow panics.
-func (c *CollectT) FailNow() {
+func (*CollectT) FailNow() {
panic("Assertion failed")
}
-// Reset clears the collected errors.
-func (c *CollectT) Reset() {
- c.errors = nil
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Reset() {
+ panic("Reset() is deprecated")
}
-// Copy copies the collected errors to the supplied t.
-func (c *CollectT) Copy(t TestingT) {
- if tt, ok := t.(tHelper); ok {
- tt.Helper()
- }
- for _, err := range c.errors {
- t.Errorf("%v", err)
- }
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Copy(TestingT) {
+ panic("Copy() is deprecated")
}
// EventuallyWithT asserts that given condition will be met in waitFor time,
@@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
h.Helper()
}
- collect := new(CollectT)
- ch := make(chan bool, 1)
+ var lastFinishedTickErrs []error
+ ch := make(chan []error, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
for tick := ticker.C; ; {
select {
case <-timer.C:
- collect.Copy(t)
+ for _, err := range lastFinishedTickErrs {
+ t.Errorf("%v", err)
+ }
return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick:
tick = nil
- collect.Reset()
go func() {
+ collect := new(CollectT)
+ defer func() {
+ ch <- collect.errors
+ }()
condition(collect)
- ch <- len(collect.errors) == 0
}()
- case v := <-ch:
- if v {
+ case errs := <-ch:
+ if len(errs) == 0 {
return true
}
+ // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
+ lastFinishedTickErrs = errs
tick = ticker.C
}
}
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index d8038c28a758..861ed4b7ced0 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -12,7 +12,7 @@ import (
// an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url, nil)
+ req, err := http.NewRequest(method, url, http.NoBody)
if err != nil {
return -1, err
}
@@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
- Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isSuccessCode
@@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
- Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isRedirectCode
@@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
- Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isErrorCode
@@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
successful := code == statuscode
if !successful {
- Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
}
return successful
@@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if len(values) > 0 {
+ url += "?" + values.Encode()
+ }
+ req, err := http.NewRequest(method, url, http.NoBody)
if err != nil {
return ""
}
@@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return contains
@@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return !contains
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 63f852147675..506a82f80777 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package require
@@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
t.FailNow()
}
-// EqualValues asserts that two objects are equal or convertable to the same types
+// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
@@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
t.FailNow()
}
-// EqualValuesf asserts that two objects are equal or convertable to the same types
+// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
@@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
t.FailNow()
}
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotImplementsf(t, interfaceObject, object, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotNil asserts that the specified object is not nil.
//
// assert.NotNil(t, err)
@@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow()
}
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+// assert.NotSubset(t, [1, 3, 4], [1, 2])
+// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
t.FailNow()
}
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
t.FailNow()
}
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+// assert.Subset(t, [1, 2, 3], [1, 2])
+// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
t.FailNow()
}
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 3b5b09330a43..eee8310a5fa9 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package require
@@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
-// EqualValues asserts that two objects are equal or convertable to the same types
+// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValues(uint32(123), int32(123))
@@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
EqualValues(a.t, expected, actual, msgAndArgs...)
}
-// EqualValuesf asserts that two objects are equal or convertable to the same types
+// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
@@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
NotErrorIsf(a.t, err, target, msg, args...)
}
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// a.NotImplements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotImplements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotImplementsf(a.t, interfaceObject, object, msg, args...)
+}
+
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
NotSamef(a.t, expected, actual, msg, args...)
}
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+// a.NotSubset([1, 3, 4], [1, 2])
+// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
NotSubset(a.t, list, subset, msgAndArgs...)
}
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
//
-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
Samef(a.t, expected, actual, msg, args...)
}
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+// a.Subset([1, 2, 3], [1, 2])
+// a.Subset({"x": 1, "y": 2}, {"x": 1})
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
Subset(a.t, list, subset, msgAndArgs...)
}
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
//
-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE
new file mode 100644
index 000000000000..ca03685b1586
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go
new file mode 100644
index 000000000000..dee9e47e4ae4
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go
@@ -0,0 +1,723 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+//
+// Tape archives (tar) are a file format for storing a sequence of files that
+// can be read and written in a streaming manner.
+// This package aims to cover most variations of the format,
+// including those produced by GNU and BSD tar tools.
+package tar
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
+// architectures. If a large value is encountered when decoding, the result
+// stored in Header will be the truncated version.
+
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errMissData = errors.New("archive/tar: sparse file references non-existent data")
+ errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
+ errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
+)
+
+type headerError []string
+
+func (he headerError) Error() string {
+ const prefix = "archive/tar: cannot encode header"
+ var ss []string
+ for _, s := range he {
+ if s != "" {
+ ss = append(ss, s)
+ }
+ }
+ if len(ss) == 0 {
+ return prefix
+ }
+ return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
+}
+
+// Type flags for Header.Typeflag.
+const (
+ // Type '0' indicates a regular file.
+ TypeReg = '0'
+ TypeRegA = '\x00' // Deprecated: Use TypeReg instead.
+
+ // Type '1' to '6' are header-only flags and may not have a data body.
+ TypeLink = '1' // Hard link
+ TypeSymlink = '2' // Symbolic link
+ TypeChar = '3' // Character device node
+ TypeBlock = '4' // Block device node
+ TypeDir = '5' // Directory
+ TypeFifo = '6' // FIFO node
+
+ // Type '7' is reserved.
+ TypeCont = '7'
+
+ // Type 'x' is used by the PAX format to store key-value records that
+ // are only relevant to the next file.
+ // This package transparently handles these types.
+ TypeXHeader = 'x'
+
+ // Type 'g' is used by the PAX format to store key-value records that
+ // are relevant to all subsequent files.
+ // This package only supports parsing and composing such headers,
+ // but does not currently support persisting the global state across files.
+ TypeXGlobalHeader = 'g'
+
+ // Type 'S' indicates a sparse file in the GNU format.
+ TypeGNUSparse = 'S'
+
+ // Types 'L' and 'K' are used by the GNU format for a meta file
+ // used to store the path or link name for the next file.
+ // This package transparently handles these types.
+ TypeGNULongName = 'L'
+ TypeGNULongLink = 'K'
+)
+
+// Keywords for PAX extended header records.
+const (
+ paxNone = "" // Indicates that no PAX key is suitable
+ paxPath = "path"
+ paxLinkpath = "linkpath"
+ paxSize = "size"
+ paxUid = "uid"
+ paxGid = "gid"
+ paxUname = "uname"
+ paxGname = "gname"
+ paxMtime = "mtime"
+ paxAtime = "atime"
+ paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
+ paxCharset = "charset" // Currently unused
+ paxComment = "comment" // Currently unused
+
+ paxSchilyXattr = "SCHILY.xattr."
+
+ // Keywords for GNU sparse files in a PAX extended header.
+ paxGNUSparse = "GNU.sparse."
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+ paxGNUSparseOffset = "GNU.sparse.offset"
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
+ paxGNUSparseMap = "GNU.sparse.map"
+ paxGNUSparseName = "GNU.sparse.name"
+ paxGNUSparseMajor = "GNU.sparse.major"
+ paxGNUSparseMinor = "GNU.sparse.minor"
+ paxGNUSparseSize = "GNU.sparse.size"
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
+)
+
+// basicKeys is a set of the PAX keys for which we have built-in support.
+// This does not contain "charset" or "comment", which are both PAX-specific,
+// so adding them as first-class features of Header is unlikely.
+// Users can use the PAXRecords field to set it themselves.
+var basicKeys = map[string]bool{
+ paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
+ paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
+}
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+//
+// For forward compatibility, users that retrieve a Header from Reader.Next,
+// mutate it in some ways, and then pass it back to Writer.WriteHeader
+// should do so by creating a new Header and copying the fields
+// that they are interested in preserving.
+type Header struct {
+ // Typeflag is the type of header entry.
+ // The zero value is automatically promoted to either TypeReg or TypeDir
+ // depending on the presence of a trailing slash in Name.
+ Typeflag byte
+
+ Name string // Name of file entry
+ Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
+
+ Size int64 // Logical file size in bytes
+ Mode int64 // Permission and mode bits
+ Uid int // User ID of owner
+ Gid int // Group ID of owner
+ Uname string // User name of owner
+ Gname string // Group name of owner
+
+ // If the Format is unspecified, then Writer.WriteHeader rounds ModTime
+ // to the nearest second and ignores the AccessTime and ChangeTime fields.
+ //
+ // To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
+ // To use sub-second resolution, specify the Format as PAX.
+ ModTime time.Time // Modification time
+ AccessTime time.Time // Access time (requires either PAX or GNU support)
+ ChangeTime time.Time // Change time (requires either PAX or GNU support)
+
+ Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
+ Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
+
+ // Xattrs stores extended attributes as PAX records under the
+ // "SCHILY.xattr." namespace.
+ //
+ // The following are semantically equivalent:
+ // h.Xattrs[key] = value
+ // h.PAXRecords["SCHILY.xattr."+key] = value
+ //
+ // When Writer.WriteHeader is called, the contents of Xattrs will take
+ // precedence over those in PAXRecords.
+ //
+ // Deprecated: Use PAXRecords instead.
+ Xattrs map[string]string
+
+ // PAXRecords is a map of PAX extended header records.
+ //
+ // User-defined records should have keys of the following form:
+ // VENDOR.keyword
+ // Where VENDOR is some namespace in all uppercase, and keyword may
+ // not contain the '=' character (e.g., "GOLANG.pkg.version").
+ // The key and value should be non-empty UTF-8 strings.
+ //
+ // When Writer.WriteHeader is called, PAX records derived from the
+ // other fields in Header take precedence over PAXRecords.
+ PAXRecords map[string]string
+
+ // Format specifies the format of the tar header.
+ //
+ // This is set by Reader.Next as a best-effort guess at the format.
+ // Since the Reader liberally reads some non-compliant files,
+ // it is possible for this to be FormatUnknown.
+ //
+ // If the format is unspecified when Writer.WriteHeader is called,
+ // then it uses the first format (in the order of USTAR, PAX, GNU)
+ // capable of encoding this Header (see Format).
+ Format Format
+}
+
+// sparseEntry represents a Length-sized fragment at Offset in the file.
+type sparseEntry struct{ Offset, Length int64 }
+
+func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
+
+// A sparse file can be represented as either a sparseDatas or a sparseHoles.
+// As long as the total size is known, they are equivalent and one can be
+// converted to the other form and back. The various tar formats with sparse
+// file support represent sparse files in the sparseDatas form. That is, they
+// specify the fragments in the file that has data, and treat everything else as
+// having zero bytes. As such, the encoding and decoding logic in this package
+// deals with sparseDatas.
+//
+// However, the external API uses sparseHoles instead of sparseDatas because the
+// zero value of sparseHoles logically represents a normal file (i.e., there are
+// no holes in it). On the other hand, the zero value of sparseDatas implies
+// that the file has no data in it, which is rather odd.
+//
+// As an example, if the underlying raw file contains the 10-byte data:
+// var compactFile = "abcdefgh"
+//
+// And the sparse map has the following entries:
+// var spd sparseDatas = []sparseEntry{
+// {Offset: 2, Length: 5}, // Data fragment for 2..6
+// {Offset: 18, Length: 3}, // Data fragment for 18..20
+// }
+// var sph sparseHoles = []sparseEntry{
+// {Offset: 0, Length: 2}, // Hole fragment for 0..1
+// {Offset: 7, Length: 11}, // Hole fragment for 7..17
+// {Offset: 21, Length: 4}, // Hole fragment for 21..24
+// }
+//
+// Then the content of the resulting sparse file with a Header.Size of 25 is:
+// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type (
+ sparseDatas []sparseEntry
+ sparseHoles []sparseEntry
+)
+
+// validateSparseEntries reports whether sp is a valid sparse map.
+// It does not matter whether sp represents data fragments or hole fragments.
+func validateSparseEntries(sp []sparseEntry, size int64) bool {
+ // Validate all sparse entries. These are the same checks as performed by
+ // the BSD tar utility.
+ if size < 0 {
+ return false
+ }
+ var pre sparseEntry
+ for _, cur := range sp {
+ switch {
+ case cur.Offset < 0 || cur.Length < 0:
+ return false // Negative values are never okay
+ case cur.Offset > math.MaxInt64-cur.Length:
+ return false // Integer overflow with large length
+ case cur.endOffset() > size:
+ return false // Region extends beyond the actual size
+ case pre.endOffset() > cur.Offset:
+ return false // Regions cannot overlap and must be in order
+ }
+ pre = cur
+ }
+ return true
+}
+
+// alignSparseEntries mutates src and returns dst where each fragment's
+// starting offset is aligned up to the nearest block edge, and each
+// ending offset is aligned down to the nearest block edge.
+//
+// Even though the Go tar Reader and the BSD tar utility can handle entries
+// with arbitrary offsets and lengths, the GNU tar utility can only handle
+// offsets and lengths that are multiples of blockSize.
+func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
+ dst := src[:0]
+ for _, s := range src {
+ pos, end := s.Offset, s.endOffset()
+ pos += blockPadding(+pos) // Round-up to nearest blockSize
+ if end != size {
+ end -= blockPadding(-end) // Round-down to nearest blockSize
+ }
+ if pos < end {
+ dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
+ }
+ }
+ return dst
+}
+
+// invertSparseEntries converts a sparse map from one form to the other.
+// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
+// The input must have been already validated.
+//
+// This function mutates src and returns a normalized map where:
+// * adjacent fragments are coalesced together
+// * only the last fragment may be empty
+// * the endOffset of the last fragment is the total size
+func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
+ dst := src[:0]
+ var pre sparseEntry
+ for _, cur := range src {
+ if cur.Length == 0 {
+ continue // Skip empty fragments
+ }
+ pre.Length = cur.Offset - pre.Offset
+ if pre.Length > 0 {
+ dst = append(dst, pre) // Only add non-empty fragments
+ }
+ pre.Offset = cur.endOffset()
+ }
+ pre.Length = size - pre.Offset // Possibly the only empty fragment
+ return append(dst, pre)
+}
+
+// fileState tracks the number of logical (includes sparse holes) and physical
+// (actual in tar archive) bytes remaining for the current file.
+//
+// Invariant: LogicalRemaining >= PhysicalRemaining
+type fileState interface {
+ LogicalRemaining() int64
+ PhysicalRemaining() int64
+}
+
+// allowedFormats determines which formats can be used.
+// The value returned is the logical OR of multiple possible formats.
+// If the value is FormatUnknown, then the input Header cannot be encoded
+// and an error is returned explaining why.
+//
+// As a by-product of checking the fields, this function returns paxHdrs, which
+// contain all fields that could not be directly encoded.
+// A value receiver ensures that this method does not mutate the source Header.
+func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
+ format = FormatUSTAR | FormatPAX | FormatGNU
+ paxHdrs = make(map[string]string)
+
+ var whyNoUSTAR, whyNoPAX, whyNoGNU string
+ var preferPAX bool // Prefer PAX over USTAR
+ verifyString := func(s string, size int, name, paxKey string) {
+ // NUL-terminator is optional for path and linkpath.
+ // Technically, it is required for uname and gname,
+ // but neither GNU nor BSD tar checks for it.
+ tooLong := len(s) > size
+ allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
+ if hasNUL(s) || (tooLong && !allowLongGNU) {
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
+ format.mustNotBe(FormatGNU)
+ }
+ if !isASCII(s) || tooLong {
+ canSplitUSTAR := paxKey == paxPath
+ if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
+ format.mustNotBe(FormatUSTAR)
+ }
+ if paxKey == paxNone {
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
+ format.mustNotBe(FormatPAX)
+ } else {
+ paxHdrs[paxKey] = s
+ }
+ }
+ if v, ok := h.PAXRecords[paxKey]; ok && v == s {
+ paxHdrs[paxKey] = v
+ }
+ }
+ verifyNumeric := func(n int64, size int, name, paxKey string) {
+ if !fitsInBase256(size, n) {
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
+ format.mustNotBe(FormatGNU)
+ }
+ if !fitsInOctal(size, n) {
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
+ format.mustNotBe(FormatUSTAR)
+ if paxKey == paxNone {
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
+ format.mustNotBe(FormatPAX)
+ } else {
+ paxHdrs[paxKey] = strconv.FormatInt(n, 10)
+ }
+ }
+ if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
+ paxHdrs[paxKey] = v
+ }
+ }
+ verifyTime := func(ts time.Time, size int, name, paxKey string) {
+ if ts.IsZero() {
+ return // Always okay
+ }
+ if !fitsInBase256(size, ts.Unix()) {
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
+ format.mustNotBe(FormatGNU)
+ }
+ isMtime := paxKey == paxMtime
+ fitsOctal := fitsInOctal(size, ts.Unix())
+ if (isMtime && !fitsOctal) || !isMtime {
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
+ format.mustNotBe(FormatUSTAR)
+ }
+ needsNano := ts.Nanosecond() != 0
+ if !isMtime || !fitsOctal || needsNano {
+ preferPAX = true // USTAR may truncate sub-second measurements
+ if paxKey == paxNone {
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
+ format.mustNotBe(FormatPAX)
+ } else {
+ paxHdrs[paxKey] = formatPAXTime(ts)
+ }
+ }
+ if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
+ paxHdrs[paxKey] = v
+ }
+ }
+
+ // Check basic fields.
+ var blk block
+ v7 := blk.V7()
+ ustar := blk.USTAR()
+ gnu := blk.GNU()
+ verifyString(h.Name, len(v7.Name()), "Name", paxPath)
+ verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
+ verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
+ verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
+ verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
+ verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
+ verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
+ verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
+ verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
+ verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
+ verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
+ verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
+ verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
+
+ // Check for header-only types.
+ var whyOnlyPAX, whyOnlyGNU string
+ switch h.Typeflag {
+ case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
+ // Exclude TypeLink and TypeSymlink, since they may reference directories.
+ if strings.HasSuffix(h.Name, "/") {
+ return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
+ }
+ case TypeXHeader, TypeGNULongName, TypeGNULongLink:
+ return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
+ case TypeXGlobalHeader:
+ h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
+ if !reflect.DeepEqual(h, h2) {
+ return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
+ }
+ whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
+ format.mayOnlyBe(FormatPAX)
+ }
+ if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
+ return FormatUnknown, nil, headerError{"negative size on header-only type"}
+ }
+
+ // Check PAX records.
+ if len(h.Xattrs) > 0 {
+ for k, v := range h.Xattrs {
+ paxHdrs[paxSchilyXattr+k] = v
+ }
+ whyOnlyPAX = "only PAX supports Xattrs"
+ format.mayOnlyBe(FormatPAX)
+ }
+ if len(h.PAXRecords) > 0 {
+ for k, v := range h.PAXRecords {
+ switch _, exists := paxHdrs[k]; {
+ case exists:
+ continue // Do not overwrite existing records
+ case h.Typeflag == TypeXGlobalHeader:
+ paxHdrs[k] = v // Copy all records
+ case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
+ paxHdrs[k] = v // Ignore local records that may conflict
+ }
+ }
+ whyOnlyPAX = "only PAX supports PAXRecords"
+ format.mayOnlyBe(FormatPAX)
+ }
+ for k, v := range paxHdrs {
+ if !validPAXRecord(k, v) {
+ return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
+ }
+ }
+
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ // Check sparse files.
+ if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
+ if isHeaderOnlyType(h.Typeflag) {
+ return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
+ }
+ if !validateSparseEntries(h.SparseHoles, h.Size) {
+ return FormatUnknown, nil, headerError{"invalid sparse holes"}
+ }
+ if h.Typeflag == TypeGNUSparse {
+ whyOnlyGNU = "only GNU supports TypeGNUSparse"
+ format.mayOnlyBe(FormatGNU)
+ } else {
+ whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
+ format.mustNotBe(FormatGNU)
+ }
+ whyNoUSTAR = "USTAR does not support sparse files"
+ format.mustNotBe(FormatUSTAR)
+ }
+ */
+
+ // Check desired format.
+ if wantFormat := h.Format; wantFormat != FormatUnknown {
+ if wantFormat.has(FormatPAX) && !preferPAX {
+ wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
+ }
+ format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
+ }
+ if format == FormatUnknown {
+ switch h.Format {
+ case FormatUSTAR:
+ err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
+ case FormatPAX:
+ err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
+ case FormatGNU:
+ err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
+ default:
+ err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
+ }
+ }
+ return format, paxHdrs, err
+}
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+ h *Header
+}
+
+func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+ if fi.IsDir() {
+ return path.Base(path.Clean(fi.h.Name))
+ }
+ return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+ // Set file permission bits.
+ mode = os.FileMode(fi.h.Mode).Perm()
+
+ // Set setuid, setgid and sticky bits.
+ if fi.h.Mode&c_ISUID != 0 {
+ mode |= os.ModeSetuid
+ }
+ if fi.h.Mode&c_ISGID != 0 {
+ mode |= os.ModeSetgid
+ }
+ if fi.h.Mode&c_ISVTX != 0 {
+ mode |= os.ModeSticky
+ }
+
+ // Set file mode bits; clear perm, setuid, setgid, and sticky bits.
+ switch m := os.FileMode(fi.h.Mode) &^ 07777; m {
+ case c_ISDIR:
+ mode |= os.ModeDir
+ case c_ISFIFO:
+ mode |= os.ModeNamedPipe
+ case c_ISLNK:
+ mode |= os.ModeSymlink
+ case c_ISBLK:
+ mode |= os.ModeDevice
+ case c_ISCHR:
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case c_ISSOCK:
+ mode |= os.ModeSocket
+ }
+
+ switch fi.h.Typeflag {
+ case TypeSymlink:
+ mode |= os.ModeSymlink
+ case TypeChar:
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case TypeBlock:
+ mode |= os.ModeDevice
+ case TypeDir:
+ mode |= os.ModeDir
+ case TypeFifo:
+ mode |= os.ModeNamedPipe
+ }
+
+ return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+const (
+ // Mode constants from the USTAR spec:
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+
+ // Common Unix mode constants; these are not defined in any common tar standard.
+ // Header.FileInfo understands these, but FileInfoHeader will never produce these.
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+//
+// Since os.FileInfo's Name method only returns the base name of
+// the file it describes, it may be necessary to modify Header.Name
+// to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+ if fi == nil {
+ return nil, errors.New("archive/tar: FileInfo is nil")
+ }
+ fm := fi.Mode()
+ h := &Header{
+ Name: fi.Name(),
+ ModTime: fi.ModTime(),
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
+ }
+ switch {
+ case fm.IsRegular():
+ h.Typeflag = TypeReg
+ h.Size = fi.Size()
+ case fi.IsDir():
+ h.Typeflag = TypeDir
+ h.Name += "/"
+ case fm&os.ModeSymlink != 0:
+ h.Typeflag = TypeSymlink
+ h.Linkname = link
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ h.Typeflag = TypeChar
+ } else {
+ h.Typeflag = TypeBlock
+ }
+ case fm&os.ModeNamedPipe != 0:
+ h.Typeflag = TypeFifo
+ case fm&os.ModeSocket != 0:
+ return nil, fmt.Errorf("archive/tar: sockets not supported")
+ default:
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+ }
+ if fm&os.ModeSetuid != 0 {
+ h.Mode |= c_ISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ h.Mode |= c_ISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ h.Mode |= c_ISVTX
+ }
+ // If possible, populate additional fields from OS-specific
+ // FileInfo fields.
+ if sys, ok := fi.Sys().(*Header); ok {
+ // This FileInfo came from a Header (not the OS). Use the
+ // original Header to populate all remaining fields.
+ h.Uid = sys.Uid
+ h.Gid = sys.Gid
+ h.Uname = sys.Uname
+ h.Gname = sys.Gname
+ h.AccessTime = sys.AccessTime
+ h.ChangeTime = sys.ChangeTime
+ if sys.Xattrs != nil {
+ h.Xattrs = make(map[string]string)
+ for k, v := range sys.Xattrs {
+ h.Xattrs[k] = v
+ }
+ }
+ if sys.Typeflag == TypeLink {
+ // hard link
+ h.Typeflag = TypeLink
+ h.Size = 0
+ h.Linkname = sys.Linkname
+ }
+ if sys.PAXRecords != nil {
+ h.PAXRecords = make(map[string]string)
+ for k, v := range sys.PAXRecords {
+ h.PAXRecords[k] = v
+ }
+ }
+ }
+ if sysStat != nil {
+ return h, sysStat(fi, h)
+ }
+ return h, nil
+}
+
+// isHeaderOnlyType checks if the given type flag is of the type that has no
+// data section even if a size is specified.
+func isHeaderOnlyType(flag byte) bool {
+ switch flag {
+ case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
+ return true
+ default:
+ return false
+ }
+}
+
+func min(a, b int64) int64 {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/vendor/github.com/vbatts/tar-split/archive/tar/format.go
new file mode 100644
index 000000000000..1f89d0c59a15
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/format.go
@@ -0,0 +1,303 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import "strings"
+
+// Format represents the tar archive format.
+//
+// The original tar format was introduced in Unix V7.
+// Since then, there have been multiple competing formats attempting to
+// standardize or extend the V7 format to overcome its limitations.
+// The most common formats are the USTAR, PAX, and GNU formats,
+// each with their own advantages and limitations.
+//
+// The following table captures the capabilities of each format:
+//
+// | USTAR | PAX | GNU
+// ------------------+--------+-----------+----------
+// Name | 256B | unlimited | unlimited
+// Linkname | 100B | unlimited | unlimited
+// Size | uint33 | unlimited | uint89
+// Mode | uint21 | uint21 | uint57
+// Uid/Gid | uint21 | unlimited | uint57
+// Uname/Gname | 32B | unlimited | 32B
+// ModTime | uint33 | unlimited | int89
+// AccessTime | n/a | unlimited | int89
+// ChangeTime | n/a | unlimited | int89
+// Devmajor/Devminor | uint21 | uint21 | uint57
+// ------------------+--------+-----------+----------
+// string encoding | ASCII | UTF-8 | binary
+// sub-second times | no | yes | no
+// sparse files | no | yes | yes
+//
+// The table's upper portion shows the Header fields, where each format reports
+// the maximum number of bytes allowed for each string field and
+// the integer type used to store each numeric field
+// (where timestamps are stored as the number of seconds since the Unix epoch).
+//
+// The table's lower portion shows specialized features of each format,
+// such as supported string encodings, support for sub-second timestamps,
+// or support for sparse files.
+//
+// The Writer currently provides no support for sparse files.
+type Format int
+
+// Constants to identify various tar formats.
+const (
+ // Deliberately hide the meaning of constants from public API.
+ _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
+
+ // FormatUnknown indicates that the format is unknown.
+ FormatUnknown
+
+ // The format of the original Unix V7 tar tool prior to standardization.
+ formatV7
+
+ // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
+ //
+ // While this format is compatible with most tar readers,
+ // the format has several limitations making it unsuitable for some usages.
+ // Most notably, it cannot support sparse files, files larger than 8GiB,
+ // filenames larger than 256 characters, and non-ASCII filenames.
+ //
+ // Reference:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+ FormatUSTAR
+
+ // FormatPAX represents the PAX header format defined in POSIX.1-2001.
+ //
+ // PAX extends USTAR by writing a special file with Typeflag TypeXHeader
+ // preceding the original header. This file contains a set of key-value
+ // records, which are used to overcome USTAR's shortcomings, in addition to
+ // providing the ability to have sub-second resolution for timestamps.
+ //
+ // Some newer formats add their own extensions to PAX by defining their
+ // own keys and assigning certain semantic meaning to the associated values.
+ // For example, sparse file support in PAX is implemented using keys
+ // defined by the GNU manual (e.g., "GNU.sparse.map").
+ //
+ // Reference:
+ // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
+ FormatPAX
+
+ // FormatGNU represents the GNU header format.
+ //
+ // The GNU header format is older than the USTAR and PAX standards and
+ // is not compatible with them. The GNU format supports
+ // arbitrary file sizes, filenames of arbitrary encoding and length,
+ // sparse files, and other features.
+ //
+ // It is recommended that PAX be chosen over GNU unless the target
+ // application can only parse GNU formatted archives.
+ //
+ // Reference:
+ // https://www.gnu.org/software/tar/manual/html_node/Standard.html
+ FormatGNU
+
+ // Schily's tar format, which is incompatible with USTAR.
+ // This does not cover STAR extensions to the PAX format; these fall under
+ // the PAX format.
+ formatSTAR
+
+ formatMax
+)
+
+func (f Format) has(f2 Format) bool { return f&f2 != 0 }
+func (f *Format) mayBe(f2 Format) { *f |= f2 }
+func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
+func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
+
+var formatNames = map[Format]string{
+ formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
+}
+
+func (f Format) String() string {
+ var ss []string
+ for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
+ if f.has(f2) {
+ ss = append(ss, formatNames[f2])
+ }
+ }
+ switch len(ss) {
+ case 0:
+ return ""
+ case 1:
+ return ss[0]
+ default:
+ return "(" + strings.Join(ss, " | ") + ")"
+ }
+}
+
+// Magics used to identify various formats.
+const (
+ magicGNU, versionGNU = "ustar ", " \x00"
+ magicUSTAR, versionUSTAR = "ustar\x00", "00"
+ trailerSTAR = "tar\x00"
+)
+
+// Size constants from various tar specifications.
+const (
+ blockSize = 512 // Size of each block in a tar stream
+ nameSize = 100 // Max length of the name field in USTAR format
+ prefixSize = 155 // Max length of the prefix field in USTAR format
+)
+
+// blockPadding computes the number of bytes needed to pad offset up to the
+// nearest block edge where 0 <= n < blockSize.
+func blockPadding(offset int64) (n int64) {
+ return -offset & (blockSize - 1)
+}
+
+var zeroBlock block
+
+type block [blockSize]byte
+
+// Convert block to any number of formats.
+func (b *block) V7() *headerV7 { return (*headerV7)(b) }
+func (b *block) GNU() *headerGNU { return (*headerGNU)(b) }
+func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) }
+func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
+func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) }
+
+// GetFormat checks that the block is a valid tar header based on the checksum.
+// It then attempts to guess the specific format based on magic values.
+// If the checksum fails, then FormatUnknown is returned.
+func (b *block) GetFormat() Format {
+ // Verify checksum.
+ var p parser
+ value := p.parseOctal(b.V7().Chksum())
+ chksum1, chksum2 := b.ComputeChecksum()
+ if p.err != nil || (value != chksum1 && value != chksum2) {
+ return FormatUnknown
+ }
+
+ // Guess the magic values.
+ magic := string(b.USTAR().Magic())
+ version := string(b.USTAR().Version())
+ trailer := string(b.STAR().Trailer())
+ switch {
+ case magic == magicUSTAR && trailer == trailerSTAR:
+ return formatSTAR
+ case magic == magicUSTAR:
+ return FormatUSTAR | FormatPAX
+ case magic == magicGNU && version == versionGNU:
+ return FormatGNU
+ default:
+ return formatV7
+ }
+}
+
+// SetFormat writes the magic values necessary for specified format
+// and then updates the checksum accordingly.
+func (b *block) SetFormat(format Format) {
+ // Set the magic values.
+ switch {
+ case format.has(formatV7):
+ // Do nothing.
+ case format.has(FormatGNU):
+ copy(b.GNU().Magic(), magicGNU)
+ copy(b.GNU().Version(), versionGNU)
+ case format.has(formatSTAR):
+ copy(b.STAR().Magic(), magicUSTAR)
+ copy(b.STAR().Version(), versionUSTAR)
+ copy(b.STAR().Trailer(), trailerSTAR)
+ case format.has(FormatUSTAR | FormatPAX):
+ copy(b.USTAR().Magic(), magicUSTAR)
+ copy(b.USTAR().Version(), versionUSTAR)
+ default:
+ panic("invalid format")
+ }
+
+ // Update checksum.
+ // This field is special in that it is terminated by a NULL then space.
+ var f formatter
+ field := b.V7().Chksum()
+ chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
+ f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
+ field[7] = ' '
+}
+
+// ComputeChecksum computes the checksum for the header block.
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
+// signed byte values.
+// We compute and return both.
+func (b *block) ComputeChecksum() (unsigned, signed int64) {
+ for i, c := range b {
+ if 148 <= i && i < 156 {
+ c = ' ' // Treat the checksum field itself as all spaces.
+ }
+ unsigned += int64(c)
+ signed += int64(int8(c))
+ }
+ return unsigned, signed
+}
+
+// Reset clears the block with all zeros.
+func (b *block) Reset() {
+ *b = block{}
+}
+
+type headerV7 [blockSize]byte
+
+func (h *headerV7) Name() []byte { return h[000:][:100] }
+func (h *headerV7) Mode() []byte { return h[100:][:8] }
+func (h *headerV7) UID() []byte { return h[108:][:8] }
+func (h *headerV7) GID() []byte { return h[116:][:8] }
+func (h *headerV7) Size() []byte { return h[124:][:12] }
+func (h *headerV7) ModTime() []byte { return h[136:][:12] }
+func (h *headerV7) Chksum() []byte { return h[148:][:8] }
+func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
+func (h *headerV7) LinkName() []byte { return h[157:][:100] }
+
+type headerGNU [blockSize]byte
+
+func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) }
+func (h *headerGNU) Magic() []byte { return h[257:][:6] }
+func (h *headerGNU) Version() []byte { return h[263:][:2] }
+func (h *headerGNU) UserName() []byte { return h[265:][:32] }
+func (h *headerGNU) GroupName() []byte { return h[297:][:32] }
+func (h *headerGNU) DevMajor() []byte { return h[329:][:8] }
+func (h *headerGNU) DevMinor() []byte { return h[337:][:8] }
+func (h *headerGNU) AccessTime() []byte { return h[345:][:12] }
+func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] }
+func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) }
+func (h *headerGNU) RealSize() []byte { return h[483:][:12] }
+
+type headerSTAR [blockSize]byte
+
+func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) }
+func (h *headerSTAR) Magic() []byte { return h[257:][:6] }
+func (h *headerSTAR) Version() []byte { return h[263:][:2] }
+func (h *headerSTAR) UserName() []byte { return h[265:][:32] }
+func (h *headerSTAR) GroupName() []byte { return h[297:][:32] }
+func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] }
+func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] }
+func (h *headerSTAR) Prefix() []byte { return h[345:][:131] }
+func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
+func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
+func (h *headerSTAR) Trailer() []byte { return h[508:][:4] }
+
+type headerUSTAR [blockSize]byte
+
+func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) }
+func (h *headerUSTAR) Magic() []byte { return h[257:][:6] }
+func (h *headerUSTAR) Version() []byte { return h[263:][:2] }
+func (h *headerUSTAR) UserName() []byte { return h[265:][:32] }
+func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
+func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] }
+func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] }
+func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] }
+
+type sparseArray []byte
+
+func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) }
+func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] }
+func (s sparseArray) MaxEntries() int { return len(s) / 24 }
+
+type sparseElem []byte
+
+func (s sparseElem) Offset() []byte { return s[00:][:12] }
+func (s sparseElem) Length() []byte { return s[12:][:12] }
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
new file mode 100644
index 000000000000..af006fc92e8f
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
@@ -0,0 +1,924 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Reader provides sequential access to the contents of a tar archive.
+// Reader.Next advances to the next file in the archive (including the first),
+// and then Reader can be treated as an io.Reader to access the file's data.
+type Reader struct {
+ r io.Reader
+ pad int64 // Amount of padding (ignored) after current file entry
+ curr fileReader // Reader for current file entry
+ blk block // Buffer to use as temporary local storage
+
+ // err is a persistent error.
+ // It is only the responsibility of every exported method of Reader to
+ // ensure that this error is sticky.
+ err error
+
+ RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this.
+ rawBytes *bytes.Buffer // last raw bits
+}
+
+type fileReader interface {
+ io.Reader
+ fileState
+
+ WriteTo(io.Writer) (int64, error)
+}
+
+// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
+// This includes the header and padding.
+//
+// # This call resets the current rawbytes buffer
+//
+// Only when RawAccounting is enabled, otherwise this returns nil
+func (tr *Reader) RawBytes() []byte {
+ if !tr.RawAccounting {
+ return nil
+ }
+ if tr.rawBytes == nil {
+ tr.rawBytes = bytes.NewBuffer(nil)
+ }
+ defer tr.rawBytes.Reset() // if we've read them, then flush them.
+
+ return tr.rawBytes.Bytes()
+
+}
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader {
+ return &Reader{r: r, curr: ®FileReader{r, 0}}
+}
+
+// Next advances to the next entry in the tar archive.
+// The Header.Size determines how many bytes can be read for the next file.
+// Any remaining data in the current file is automatically discarded.
+//
+// io.EOF is returned at the end of the input.
+func (tr *Reader) Next() (*Header, error) {
+ if tr.err != nil {
+ return nil, tr.err
+ }
+ hdr, err := tr.next()
+ tr.err = err
+ return hdr, err
+}
+
+func (tr *Reader) next() (*Header, error) {
+ var paxHdrs map[string]string
+ var gnuLongName, gnuLongLink string
+
+ if tr.RawAccounting {
+ if tr.rawBytes == nil {
+ tr.rawBytes = bytes.NewBuffer(nil)
+ } else {
+ tr.rawBytes.Reset()
+ }
+ }
+
+ // Externally, Next iterates through the tar archive as if it is a series of
+ // files. Internally, the tar format often uses fake "files" to add meta
+ // data that describes the next file. These meta data "files" should not
+ // normally be visible to the outside. As such, this loop iterates through
+ // one or more "header files" until it finds a "normal file".
+ format := FormatUSTAR | FormatPAX | FormatGNU
+ for {
+ // Discard the remainder of the file and any padding.
+ if err := discard(tr, tr.curr.PhysicalRemaining()); err != nil {
+ return nil, err
+ }
+ n, err := tryReadFull(tr.r, tr.blk[:tr.pad])
+ if err != nil {
+ return nil, err
+ }
+ if tr.RawAccounting {
+ tr.rawBytes.Write(tr.blk[:n])
+ }
+ tr.pad = 0
+
+ hdr, rawHdr, err := tr.readHeader()
+ if err != nil {
+ return nil, err
+ }
+ if err := tr.handleRegularFile(hdr); err != nil {
+ return nil, err
+ }
+ format.mayOnlyBe(hdr.Format)
+
+ // Check for PAX/GNU special headers and files.
+ switch hdr.Typeflag {
+ case TypeXHeader, TypeXGlobalHeader:
+ format.mayOnlyBe(FormatPAX)
+ paxHdrs, err = parsePAX(tr)
+ if err != nil {
+ return nil, err
+ }
+ if hdr.Typeflag == TypeXGlobalHeader {
+ if err = mergePAX(hdr, paxHdrs); err != nil {
+ return nil, err
+ }
+ return &Header{
+ Name: hdr.Name,
+ Typeflag: hdr.Typeflag,
+ Xattrs: hdr.Xattrs,
+ PAXRecords: hdr.PAXRecords,
+ Format: format,
+ }, nil
+ }
+ continue // This is a meta header affecting the next header
+ case TypeGNULongName, TypeGNULongLink:
+ format.mayOnlyBe(FormatGNU)
+ realname, err := io.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+
+ if tr.RawAccounting {
+ tr.rawBytes.Write(realname)
+ }
+
+ var p parser
+ switch hdr.Typeflag {
+ case TypeGNULongName:
+ gnuLongName = p.parseString(realname)
+ case TypeGNULongLink:
+ gnuLongLink = p.parseString(realname)
+ }
+ continue // This is a meta header affecting the next header
+ default:
+ // The old GNU sparse format is handled here since it is technically
+ // just a regular file with additional attributes.
+
+ if err := mergePAX(hdr, paxHdrs); err != nil {
+ return nil, err
+ }
+ if gnuLongName != "" {
+ hdr.Name = gnuLongName
+ }
+ if gnuLongLink != "" {
+ hdr.Linkname = gnuLongLink
+ }
+ if hdr.Typeflag == TypeRegA {
+ if strings.HasSuffix(hdr.Name, "/") {
+ hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
+ } else {
+ hdr.Typeflag = TypeReg
+ }
+ }
+
+ // The extended headers may have updated the size.
+ // Thus, setup the regFileReader again after merging PAX headers.
+ if err := tr.handleRegularFile(hdr); err != nil {
+ return nil, err
+ }
+
+ // Sparse formats rely on being able to read from the logical data
+ // section; there must be a preceding call to handleRegularFile.
+ if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
+ return nil, err
+ }
+
+ // Set the final guess at the format.
+ if format.has(FormatUSTAR) && format.has(FormatPAX) {
+ format.mayOnlyBe(FormatUSTAR)
+ }
+ hdr.Format = format
+ return hdr, nil // This is a file, so stop
+ }
+ }
+}
+
+// handleRegularFile sets up the current file reader and padding such that it
+// can only read the following logical data section. It will properly handle
+// special headers that contain no data section.
+func (tr *Reader) handleRegularFile(hdr *Header) error {
+ nb := hdr.Size
+ if isHeaderOnlyType(hdr.Typeflag) {
+ nb = 0
+ }
+ if nb < 0 {
+ return ErrHeader
+ }
+
+ tr.pad = blockPadding(nb)
+ tr.curr = ®FileReader{r: tr.r, nb: nb}
+ return nil
+}
+
+// handleSparseFile checks if the current file is a sparse format of any type
+// and sets the curr reader appropriately.
+func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
+ var spd sparseDatas
+ var err error
+ if hdr.Typeflag == TypeGNUSparse {
+ spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
+ } else {
+ spd, err = tr.readGNUSparsePAXHeaders(hdr)
+ }
+
+ // If sp is non-nil, then this is a sparse file.
+ // Note that it is possible for len(sp) == 0.
+ if err == nil && spd != nil {
+ if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
+ return ErrHeader
+ }
+ sph := invertSparseEntries(spd, hdr.Size)
+ tr.curr = &sparseFileReader{tr.curr, sph, 0}
+ }
+ return err
+}
+
+// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
+// If they are found, then this function reads the sparse map and returns it.
+// This assumes that 0.0 headers have already been converted to 0.1 headers
+// by the PAX header parsing logic.
+func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
+ // Identify the version of GNU headers.
+ var is1x0 bool
+ major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
+ switch {
+ case major == "0" && (minor == "0" || minor == "1"):
+ is1x0 = false
+ case major == "1" && minor == "0":
+ is1x0 = true
+ case major != "" || minor != "":
+ return nil, nil // Unknown GNU sparse PAX version
+ case hdr.PAXRecords[paxGNUSparseMap] != "":
+ is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
+ default:
+ return nil, nil // Not a PAX format GNU sparse file.
+ }
+ hdr.Format.mayOnlyBe(FormatPAX)
+
+ // Update hdr from GNU sparse PAX headers.
+ if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
+ hdr.Name = name
+ }
+ size := hdr.PAXRecords[paxGNUSparseSize]
+ if size == "" {
+ size = hdr.PAXRecords[paxGNUSparseRealSize]
+ }
+ if size != "" {
+ n, err := strconv.ParseInt(size, 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = n
+ }
+
+ // Read the sparse map according to the appropriate format.
+ if is1x0 {
+ return readGNUSparseMap1x0(tr.curr)
+ }
+ return readGNUSparseMap0x1(hdr.PAXRecords)
+}
+
+// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
+func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
+ for k, v := range paxHdrs {
+ if v == "" {
+ continue // Keep the original USTAR value
+ }
+ var id64 int64
+ switch k {
+ case paxPath:
+ hdr.Name = v
+ case paxLinkpath:
+ hdr.Linkname = v
+ case paxUname:
+ hdr.Uname = v
+ case paxGname:
+ hdr.Gname = v
+ case paxUid:
+ id64, err = strconv.ParseInt(v, 10, 64)
+ hdr.Uid = int(id64) // Integer overflow possible
+ case paxGid:
+ id64, err = strconv.ParseInt(v, 10, 64)
+ hdr.Gid = int(id64) // Integer overflow possible
+ case paxAtime:
+ hdr.AccessTime, err = parsePAXTime(v)
+ case paxMtime:
+ hdr.ModTime, err = parsePAXTime(v)
+ case paxCtime:
+ hdr.ChangeTime, err = parsePAXTime(v)
+ case paxSize:
+ hdr.Size, err = strconv.ParseInt(v, 10, 64)
+ default:
+ if strings.HasPrefix(k, paxSchilyXattr) {
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[k[len(paxSchilyXattr):]] = v
+ }
+ }
+ if err != nil {
+ return ErrHeader
+ }
+ }
+ hdr.PAXRecords = paxHdrs
+ return nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ // leaving this function for io.Reader makes it more testable
+ if tr, ok := r.(*Reader); ok && tr.RawAccounting {
+ if _, err = tr.rawBytes.Write(buf); err != nil {
+ return nil, err
+ }
+ }
+ sbuf := string(buf)
+
+ // For GNU PAX sparse format 0.0 support.
+ // This function transforms the sparse format 0.0 headers into format 0.1
+ // headers since 0.0 headers were not PAX compliant.
+ var sparseMap []string
+
+ paxHdrs := make(map[string]string)
+ for len(sbuf) > 0 {
+ key, value, residual, err := parsePAXRecord(sbuf)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sbuf = residual
+
+ switch key {
+ case paxGNUSparseOffset, paxGNUSparseNumBytes:
+ // Validate sparse header order and value.
+ if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
+ (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
+ strings.Contains(value, ",") {
+ return nil, ErrHeader
+ }
+ sparseMap = append(sparseMap, value)
+ default:
+ paxHdrs[key] = value
+ }
+ }
+ if len(sparseMap) > 0 {
+ paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
+ }
+ return paxHdrs, nil
+}
+
+// readHeader reads the next block header and assumes that the underlying reader
+// is already aligned to a block boundary. It returns the raw block of the
+// header in case further processing is required.
+//
+// The err will be set to io.EOF only when one of the following occurs:
+// - Exactly 0 bytes are read and EOF is hit.
+// - Exactly 1 block of zeros is read and EOF is hit.
+// - At least 2 blocks of zeros are read.
+func (tr *Reader) readHeader() (*Header, *block, error) {
+ // Two blocks of zero bytes marks the end of the archive.
+ n, err := io.ReadFull(tr.r, tr.blk[:])
+ if tr.RawAccounting && (err == nil || err == io.EOF) {
+ tr.rawBytes.Write(tr.blk[:n])
+ }
+ if err != nil {
+ return nil, nil, err // EOF is okay here; exactly 0 bytes read
+ }
+
+ if bytes.Equal(tr.blk[:], zeroBlock[:]) {
+ n, err = io.ReadFull(tr.r, tr.blk[:])
+ if tr.RawAccounting && (err == nil || err == io.EOF) {
+ tr.rawBytes.Write(tr.blk[:n])
+ }
+ if err != nil {
+ return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
+ }
+ if bytes.Equal(tr.blk[:], zeroBlock[:]) {
+ return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
+ }
+ return nil, nil, ErrHeader // Zero block and then non-zero block
+ }
+
+ // Verify the header matches a known format.
+ format := tr.blk.GetFormat()
+ if format == FormatUnknown {
+ return nil, nil, ErrHeader
+ }
+
+ var p parser
+ hdr := new(Header)
+
+ // Unpack the V7 header.
+ v7 := tr.blk.V7()
+ hdr.Typeflag = v7.TypeFlag()[0]
+ hdr.Name = p.parseString(v7.Name())
+ hdr.Linkname = p.parseString(v7.LinkName())
+ hdr.Size = p.parseNumeric(v7.Size())
+ hdr.Mode = p.parseNumeric(v7.Mode())
+ hdr.Uid = int(p.parseNumeric(v7.UID()))
+ hdr.Gid = int(p.parseNumeric(v7.GID()))
+ hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
+
+ // Unpack format specific fields.
+ if format > formatV7 {
+ ustar := tr.blk.USTAR()
+ hdr.Uname = p.parseString(ustar.UserName())
+ hdr.Gname = p.parseString(ustar.GroupName())
+ hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
+ hdr.Devminor = p.parseNumeric(ustar.DevMinor())
+
+ var prefix string
+ switch {
+ case format.has(FormatUSTAR | FormatPAX):
+ hdr.Format = format
+ ustar := tr.blk.USTAR()
+ prefix = p.parseString(ustar.Prefix())
+
+ // For Format detection, check if block is properly formatted since
+ // the parser is more liberal than what USTAR actually permits.
+ notASCII := func(r rune) bool { return r >= 0x80 }
+ if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
+ hdr.Format = FormatUnknown // Non-ASCII characters in block.
+ }
+ nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
+ if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
+ nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
+ hdr.Format = FormatUnknown // Numeric fields must end in NUL
+ }
+ case format.has(formatSTAR):
+ star := tr.blk.STAR()
+ prefix = p.parseString(star.Prefix())
+ hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
+ hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
+ case format.has(FormatGNU):
+ hdr.Format = format
+ var p2 parser
+ gnu := tr.blk.GNU()
+ if b := gnu.AccessTime(); b[0] != 0 {
+ hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
+ }
+ if b := gnu.ChangeTime(); b[0] != 0 {
+ hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
+ }
+
+ // Prior to Go1.8, the Writer had a bug where it would output
+ // an invalid tar file in certain rare situations because the logic
+ // incorrectly believed that the old GNU format had a prefix field.
+ // This is wrong and leads to an output file that mangles the
+ // atime and ctime fields, which are often left unused.
+ //
+ // In order to continue reading tar files created by former, buggy
+ // versions of Go, we skeptically parse the atime and ctime fields.
+ // If we are unable to parse them and the prefix field looks like
+ // an ASCII string, then we fallback on the pre-Go1.8 behavior
+ // of treating these fields as the USTAR prefix field.
+ //
+ // Note that this will not use the fallback logic for all possible
+ // files generated by a pre-Go1.8 toolchain. If the generated file
+ // happened to have a prefix field that parses as valid
+ // atime and ctime fields (e.g., when they are valid octal strings),
+ // then it is impossible to distinguish between an valid GNU file
+ // and an invalid pre-Go1.8 file.
+ //
+ // See https://golang.org/issues/12594
+ // See https://golang.org/issues/21005
+ if p2.err != nil {
+ hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
+ ustar := tr.blk.USTAR()
+ if s := p.parseString(ustar.Prefix()); isASCII(s) {
+ prefix = s
+ }
+ hdr.Format = FormatUnknown // Buggy file is not GNU
+ }
+ }
+ if len(prefix) > 0 {
+ hdr.Name = prefix + "/" + hdr.Name
+ }
+ }
+ return hdr, &tr.blk, p.err
+}
+
+// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough.
+// If it's larger than four entries, then one or more extension headers are used
+// to store the rest of the sparse map.
+//
+// The Header.Size does not reflect the size of any extended headers used.
+// Thus, this function will read from the raw io.Reader to fetch extra headers.
+// This method mutates blk in the process.
+func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
+ // Make sure that the input format is GNU.
+ // Unfortunately, the STAR format also has a sparse header format that uses
+ // the same type flag but has a completely different layout.
+ if blk.GetFormat() != FormatGNU {
+ return nil, ErrHeader
+ }
+ hdr.Format.mayOnlyBe(FormatGNU)
+
+ var p parser
+ hdr.Size = p.parseNumeric(blk.GNU().RealSize())
+ if p.err != nil {
+ return nil, p.err
+ }
+ s := blk.GNU().Sparse()
+ spd := make(sparseDatas, 0, s.MaxEntries())
+ for {
+ for i := 0; i < s.MaxEntries(); i++ {
+ // This termination condition is identical to GNU and BSD tar.
+ if s.Entry(i).Offset()[0] == 0x00 {
+ break // Don't return, need to process extended headers (even if empty)
+ }
+ offset := p.parseNumeric(s.Entry(i).Offset())
+ length := p.parseNumeric(s.Entry(i).Length())
+ if p.err != nil {
+ return nil, p.err
+ }
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
+ }
+
+ if s.IsExtended()[0] > 0 {
+ // There are more entries. Read an extension header and parse its entries.
+ if _, err := mustReadFull(tr.r, blk[:]); err != nil {
+ return nil, err
+ }
+ if tr.RawAccounting {
+ tr.rawBytes.Write(blk[:])
+ }
+ s = blk.Sparse()
+ continue
+ }
+ return spd, nil // Done
+ }
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
+// version 1.0. The format of the sparse map consists of a series of
+// newline-terminated numeric fields. The first field is the number of entries
+// and is always present. Following this are the entries, consisting of two
+// fields (offset, length). This function must stop reading at the end
+// boundary of the block containing the last newline.
+//
+// Note that the GNU manual says that numeric values should be encoded in octal
+// format. However, the GNU tar utility itself outputs these values in decimal.
+// As such, this library treats values as being encoded in decimal.
+func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
+ var (
+ cntNewline int64
+ buf bytes.Buffer
+ blk block
+ )
+
+ // feedTokens copies data in blocks from r into buf until there are
+ // at least cnt newlines in buf. It will not read more blocks than needed.
+ feedTokens := func(n int64) error {
+ for cntNewline < n {
+ if _, err := mustReadFull(r, blk[:]); err != nil {
+ return err
+ }
+ buf.Write(blk[:])
+ for _, c := range blk {
+ if c == '\n' {
+ cntNewline++
+ }
+ }
+ }
+ return nil
+ }
+
+ // nextToken gets the next token delimited by a newline. This assumes that
+ // at least one newline exists in the buffer.
+ nextToken := func() string {
+ cntNewline--
+ tok, _ := buf.ReadString('\n')
+ return strings.TrimRight(tok, "\n")
+ }
+
+ // Parse for the number of entries.
+ // Use integer overflow resistant math to check this.
+ if err := feedTokens(1); err != nil {
+ return nil, err
+ }
+ numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+ return nil, ErrHeader
+ }
+
+ // Parse for all member entries.
+ // numEntries is trusted after this since a potential attacker must have
+ // committed resources proportional to what this library used.
+ if err := feedTokens(2 * numEntries); err != nil {
+ return nil, err
+ }
+ spd := make(sparseDatas, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
+ length, err2 := strconv.ParseInt(nextToken(), 10, 64)
+ if err1 != nil || err2 != nil {
+ return nil, ErrHeader
+ }
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
+ }
+ return spd, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
+// version 0.1. The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
+ // Get number of entries.
+ // Use integer overflow resistant math to check this.
+ numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
+ numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+ return nil, ErrHeader
+ }
+
+ // There should be two numbers in sparseMap for each entry.
+ sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
+ if len(sparseMap) == 1 && sparseMap[0] == "" {
+ sparseMap = sparseMap[:0]
+ }
+ if int64(len(sparseMap)) != 2*numEntries {
+ return nil, ErrHeader
+ }
+
+ // Loop through the entries in the sparse map.
+ // numEntries is trusted now.
+ spd := make(sparseDatas, 0, numEntries)
+ for len(sparseMap) >= 2 {
+ offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
+ length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
+ if err1 != nil || err2 != nil {
+ return nil, ErrHeader
+ }
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
+ sparseMap = sparseMap[2:]
+ }
+ return spd, nil
+}
+
+// Read reads from the current file in the tar archive.
+// It returns (0, io.EOF) when it reaches the end of that file,
+// until Next is called to advance to the next file.
+//
+// If the current file is sparse, then the regions marked as a hole
+// are read back as NUL-bytes.
+//
+// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
+// the Header.Size claims.
+func (tr *Reader) Read(b []byte) (int, error) {
+ if tr.err != nil {
+ return 0, tr.err
+ }
+ n, err := tr.curr.Read(b)
+ if err != nil && err != io.EOF {
+ tr.err = err
+ }
+ return n, err
+}
+
+// writeTo writes the content of the current file to w.
+// The bytes written matches the number of remaining bytes in the current file.
+//
+// If the current file is sparse and w is an io.WriteSeeker,
+// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
+// assuming that skipped regions are filled with NULs.
+// This always writes the last byte to ensure w is the right size.
+//
+// TODO(dsnet): Re-export this when adding sparse file support.
+// See https://golang.org/issue/22735
+func (tr *Reader) writeTo(w io.Writer) (int64, error) {
+ if tr.err != nil {
+ return 0, tr.err
+ }
+ n, err := tr.curr.WriteTo(w)
+ if err != nil {
+ tr.err = err
+ }
+ return n, err
+}
+
+// regFileReader is a fileReader for reading data from a regular file entry.
+type regFileReader struct {
+ r io.Reader // Underlying Reader
+ nb int64 // Number of remaining bytes to read
+}
+
+func (fr *regFileReader) Read(b []byte) (n int, err error) {
+ if int64(len(b)) > fr.nb {
+ b = b[:fr.nb]
+ }
+ if len(b) > 0 {
+ n, err = fr.r.Read(b)
+ fr.nb -= int64(n)
+ }
+ switch {
+ case err == io.EOF && fr.nb > 0:
+ return n, io.ErrUnexpectedEOF
+ case err == nil && fr.nb == 0:
+ return n, io.EOF
+ default:
+ return n, err
+ }
+}
+
+func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
+ return io.Copy(w, struct{ io.Reader }{fr})
+}
+
+func (fr regFileReader) LogicalRemaining() int64 {
+ return fr.nb
+}
+
+func (fr regFileReader) PhysicalRemaining() int64 {
+ return fr.nb
+}
+
+// sparseFileReader is a fileReader for reading data from a sparse file entry.
+type sparseFileReader struct {
+ fr fileReader // Underlying fileReader
+ sp sparseHoles // Normalized list of sparse holes
+ pos int64 // Current position in sparse file
+}
+
+func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
+ finished := int64(len(b)) >= sr.LogicalRemaining()
+ if finished {
+ b = b[:sr.LogicalRemaining()]
+ }
+
+ b0 := b
+ endPos := sr.pos + int64(len(b))
+ for endPos > sr.pos && err == nil {
+ var nf int // Bytes read in fragment
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+ if sr.pos < holeStart { // In a data fragment
+ bf := b[:min(int64(len(b)), holeStart-sr.pos)]
+ nf, err = tryReadFull(sr.fr, bf)
+ } else { // In a hole fragment
+ bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
+ nf, err = tryReadFull(zeroReader{}, bf)
+ }
+ b = b[nf:]
+ sr.pos += int64(nf)
+ if sr.pos >= holeEnd && len(sr.sp) > 1 {
+ sr.sp = sr.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ n = len(b0) - len(b)
+ switch {
+ case err == io.EOF:
+ return n, errMissData // Less data in dense file than sparse file
+ case err != nil:
+ return n, err
+ case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+ return n, errUnrefData // More data in dense file than sparse file
+ case finished:
+ return n, io.EOF
+ default:
+ return n, nil
+ }
+}
+
+func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
+ ws, ok := w.(io.WriteSeeker)
+ if ok {
+ if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
+ ok = false // Not all io.Seeker can really seek
+ }
+ }
+ if !ok {
+ return io.Copy(w, struct{ io.Reader }{sr})
+ }
+
+ var writeLastByte bool
+ pos0 := sr.pos
+ for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
+ var nf int64 // Size of fragment
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+ if sr.pos < holeStart { // In a data fragment
+ nf = holeStart - sr.pos
+ nf, err = io.CopyN(ws, sr.fr, nf)
+ } else { // In a hole fragment
+ nf = holeEnd - sr.pos
+ if sr.PhysicalRemaining() == 0 {
+ writeLastByte = true
+ nf--
+ }
+ _, err = ws.Seek(nf, io.SeekCurrent)
+ }
+ sr.pos += nf
+ if sr.pos >= holeEnd && len(sr.sp) > 1 {
+ sr.sp = sr.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ // If the last fragment is a hole, then seek to 1-byte before EOF, and
+ // write a single byte to ensure the file is the right size.
+ if writeLastByte && err == nil {
+ _, err = ws.Write([]byte{0})
+ sr.pos++
+ }
+
+ n = sr.pos - pos0
+ switch {
+ case err == io.EOF:
+ return n, errMissData // Less data in dense file than sparse file
+ case err != nil:
+ return n, err
+ case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+ return n, errUnrefData // More data in dense file than sparse file
+ default:
+ return n, nil
+ }
+}
+
+func (sr sparseFileReader) LogicalRemaining() int64 {
+ return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
+}
+func (sr sparseFileReader) PhysicalRemaining() int64 {
+ return sr.fr.PhysicalRemaining()
+}
+
+type zeroReader struct{}
+
+func (zeroReader) Read(b []byte) (int, error) {
+ for i := range b {
+ b[i] = 0
+ }
+ return len(b), nil
+}
+
+// mustReadFull is like io.ReadFull except it returns
+// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
+func mustReadFull(r io.Reader, b []byte) (int, error) {
+ n, err := tryReadFull(r, b)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return n, err
+}
+
+// tryReadFull is like io.ReadFull except it returns
+// io.EOF when it is hit before len(b) bytes are read.
+func tryReadFull(r io.Reader, b []byte) (n int, err error) {
+ for len(b) > n && err == nil {
+ var nn int
+ nn, err = r.Read(b[n:])
+ n += nn
+ }
+ if len(b) == n && err == io.EOF {
+ err = nil
+ }
+ return n, err
+}
+
+// discard skips n bytes in r, reporting an error if unable to do so.
+func discard(tr *Reader, n int64) error {
+ var seekSkipped, copySkipped int64
+ var err error
+ r := tr.r
+ if tr.RawAccounting {
+
+ copySkipped, err = io.CopyN(tr.rawBytes, tr.r, n)
+ goto out
+ }
+
+ // If possible, Seek to the last byte before the end of the data section.
+ // Do this because Seek is often lazy about reporting errors; this will mask
+ // the fact that the stream may be truncated. We can rely on the
+ // io.CopyN done shortly afterwards to trigger any IO errors.
+ if sr, ok := r.(io.Seeker); ok && n > 1 {
+ // Not all io.Seeker can actually Seek. For example, os.Stdin implements
+ // io.Seeker, but calling Seek always returns an error and performs
+ // no action. Thus, we try an innocent seek to the current position
+ // to see if Seek is really supported.
+ pos1, err := sr.Seek(0, io.SeekCurrent)
+ if pos1 >= 0 && err == nil {
+ // Seek seems supported, so perform the real Seek.
+ pos2, err := sr.Seek(n-1, io.SeekCurrent)
+ if pos2 < 0 || err != nil {
+ return err
+ }
+ seekSkipped = pos2 - pos1
+ }
+ }
+
+ copySkipped, err = io.CopyN(io.Discard, r, n-seekSkipped)
+out:
+ if err == io.EOF && seekSkipped+copySkipped < n {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go
new file mode 100644
index 000000000000..cf9cc79c5915
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctim.Unix())
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go
new file mode 100644
index 000000000000..6f17dbe30725
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
new file mode 100644
index 000000000000..868105f338ef
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
@@ -0,0 +1,96 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+ "os"
+ "os/user"
+ "runtime"
+ "strconv"
+ "sync"
+ "syscall"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+// userMap and groupMap caches UID and GID lookups for performance reasons.
+// The downside is that renaming uname or gname by the OS never takes effect.
+var userMap, groupMap sync.Map // map[int]string
+
+func statUnix(fi os.FileInfo, h *Header) error {
+ sys, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+ h.Uid = int(sys.Uid)
+ h.Gid = int(sys.Gid)
+
+ // Best effort at populating Uname and Gname.
+ // The os/user functions may fail for any number of reasons
+ // (not implemented on that platform, cgo not enabled, etc).
+ if u, ok := userMap.Load(h.Uid); ok {
+ h.Uname = u.(string)
+ } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
+ h.Uname = u.Username
+ userMap.Store(h.Uid, h.Uname)
+ }
+ if g, ok := groupMap.Load(h.Gid); ok {
+ h.Gname = g.(string)
+ } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
+ h.Gname = g.Name
+ groupMap.Store(h.Gid, h.Gname)
+ }
+
+ h.AccessTime = statAtime(sys)
+ h.ChangeTime = statCtime(sys)
+
+ // Best effort at populating Devmajor and Devminor.
+ if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
+ dev := uint64(sys.Rdev) // May be int32 or uint32
+ switch runtime.GOOS {
+ case "linux":
+ // Copied from golang.org/x/sys/unix/dev_linux.go.
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "darwin":
+ // Copied from golang.org/x/sys/unix/dev_darwin.go.
+ major := uint32((dev >> 24) & 0xff)
+ minor := uint32(dev & 0xffffff)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "dragonfly":
+ // Copied from golang.org/x/sys/unix/dev_dragonfly.go.
+ major := uint32((dev >> 8) & 0xff)
+ minor := uint32(dev & 0xffff00ff)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "freebsd":
+ // Copied from golang.org/x/sys/unix/dev_freebsd.go.
+ major := uint32((dev >> 8) & 0xff)
+ minor := uint32(dev & 0xffff00ff)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "netbsd":
+ // Copied from golang.org/x/sys/unix/dev_netbsd.go.
+ major := uint32((dev & 0x000fff00) >> 8)
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "openbsd":
+ // Copied from golang.org/x/sys/unix/dev_openbsd.go.
+ major := uint32((dev & 0x0000ff00) >> 8)
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ default:
+ // TODO: Implement solaris (see https://golang.org/issue/8106)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go b/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go
new file mode 100644
index 000000000000..d144485a4924
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go
@@ -0,0 +1,326 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// hasNUL reports whether the NUL character exists within s.
+func hasNUL(s string) bool {
+ return strings.IndexByte(s, 0) >= 0
+}
+
+// isASCII reports whether the input is an ASCII C-style string.
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c >= 0x80 || c == 0x00 {
+ return false
+ }
+ }
+ return true
+}
+
+// toASCII converts the input to an ASCII C-style string.
+// This a best effort conversion, so invalid characters are dropped.
+func toASCII(s string) string {
+ if isASCII(s) {
+ return s
+ }
+ b := make([]byte, 0, len(s))
+ for _, c := range s {
+ if c < 0x80 && c != 0x00 {
+ b = append(b, byte(c))
+ }
+ }
+ return string(b)
+}
+
+type parser struct {
+ err error // Last error seen
+}
+
+type formatter struct {
+ err error // Last error seen
+}
+
+// parseString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func (*parser) parseString(b []byte) string {
+ if i := bytes.IndexByte(b, 0); i >= 0 {
+ return string(b[:i])
+ }
+ return string(b)
+}
+
+// formatString copies s into b, NUL-terminating if possible.
+func (f *formatter) formatString(b []byte, s string) {
+ if len(s) > len(b) {
+ f.err = ErrFieldTooLong
+ }
+ copy(b, s)
+ if len(s) < len(b) {
+ b[len(s)] = 0
+ }
+
+ // Some buggy readers treat regular files with a trailing slash
+ // in the V7 path field as a directory even though the full path
+ // recorded elsewhere (e.g., via PAX record) contains no trailing slash.
+ if len(s) > len(b) && b[len(b)-1] == '/' {
+ n := len(strings.TrimRight(s[:len(b)], "/"))
+ b[n] = 0 // Replace trailing slash with NUL terminator
+ }
+}
+
+// fitsInBase256 reports whether x can be encoded into n bytes using base-256
+// encoding. Unlike octal encoding, base-256 encoding does not require that the
+// string ends with a NUL character. Thus, all n bytes are available for output.
+//
+// If operating in binary mode, this assumes strict GNU binary mode; which means
+// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
+// equivalent to the sign bit in two's complement form.
+func fitsInBase256(n int, x int64) bool {
+ binBits := uint(n-1) * 8
+ return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 {
+ // Handling negative numbers relies on the following identity:
+ // -a-1 == ^a
+ //
+ // If the number is negative, we use an inversion mask to invert the
+ // data bytes and treat the value as an unsigned number.
+ var inv byte // 0x00 if positive or zero, 0xff if negative
+ if b[0]&0x40 != 0 {
+ inv = 0xff
+ }
+
+ var x uint64
+ for i, c := range b {
+ c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
+ if i == 0 {
+ c &= 0x7f // Ignore signal bit in first byte
+ }
+ if (x >> 56) > 0 {
+ p.err = ErrHeader // Integer overflow
+ return 0
+ }
+ x = x<<8 | uint64(c)
+ }
+ if (x >> 63) > 0 {
+ p.err = ErrHeader // Integer overflow
+ return 0
+ }
+ if inv == 0xff {
+ return ^int64(x)
+ }
+ return int64(x)
+ }
+
+ // Normal case is base-8 (octal) format.
+ return p.parseOctal(b)
+}
+
+// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
+// Otherwise it will attempt to use base-256 (binary) encoding.
+func (f *formatter) formatNumeric(b []byte, x int64) {
+ if fitsInOctal(len(b), x) {
+ f.formatOctal(b, x)
+ return
+ }
+
+ if fitsInBase256(len(b), x) {
+ for i := len(b) - 1; i >= 0; i-- {
+ b[i] = byte(x)
+ x >>= 8
+ }
+ b[0] |= 0x80 // Highest bit indicates binary format
+ return
+ }
+
+ f.formatOctal(b, 0) // Last resort, just write zero
+ f.err = ErrFieldTooLong
+}
+
+func (p *parser) parseOctal(b []byte) int64 {
+ // Because unused fields are filled with NULs, we need
+ // to skip leading NULs. Fields may also be padded with
+ // spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to
+ // be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return 0
+ }
+ x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
+ if perr != nil {
+ p.err = ErrHeader
+ }
+ return int64(x)
+}
+
+func (f *formatter) formatOctal(b []byte, x int64) {
+ if !fitsInOctal(len(b), x) {
+ x = 0 // Last resort, just write zero
+ f.err = ErrFieldTooLong
+ }
+
+ s := strconv.FormatInt(x, 8)
+ // Add leading zeros, but leave room for a NUL.
+ if n := len(b) - len(s) - 1; n > 0 {
+ s = strings.Repeat("0", n) + s
+ }
+ f.formatString(b, s)
+}
+
+// fitsInOctal reports whether the integer x fits in a field n-bytes long
+// using octal encoding with the appropriate NUL terminator.
+func fitsInOctal(n int, x int64) bool {
+ octBits := uint(n-1) * 3
+ return x >= 0 && (n >= 22 || x < 1<= 0 {
+ ss, sn = s[:pos], s[pos+1:]
+ }
+
+ // Parse the seconds.
+ secs, err := strconv.ParseInt(ss, 10, 64)
+ if err != nil {
+ return time.Time{}, ErrHeader
+ }
+ if len(sn) == 0 {
+ return time.Unix(secs, 0), nil // No sub-second values
+ }
+
+ // Parse the nanoseconds.
+ if strings.Trim(sn, "0123456789") != "" {
+ return time.Time{}, ErrHeader
+ }
+ if len(sn) < maxNanoSecondDigits {
+ sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
+ } else {
+ sn = sn[:maxNanoSecondDigits] // Right truncate
+ }
+ nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
+ if len(ss) > 0 && ss[0] == '-' {
+ return time.Unix(secs, -1*nsecs), nil // Negative correction
+ }
+ return time.Unix(secs, nsecs), nil
+}
+
+// formatPAXTime converts ts into a time of the form %d.%d as described in the
+// PAX specification. This function is capable of negative timestamps.
+func formatPAXTime(ts time.Time) (s string) {
+ secs, nsecs := ts.Unix(), ts.Nanosecond()
+ if nsecs == 0 {
+ return strconv.FormatInt(secs, 10)
+ }
+
+ // If seconds is negative, then perform correction.
+ sign := ""
+ if secs < 0 {
+ sign = "-" // Remember sign
+ secs = -(secs + 1) // Add a second to secs
+ nsecs = -(nsecs - 1E9) // Take that second away from nsecs
+ }
+ return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
+}
+
+// parsePAXRecord parses the input PAX record string into a key-value pair.
+// If parsing is successful, it will slice off the currently read record and
+// return the remainder as r.
+func parsePAXRecord(s string) (k, v, r string, err error) {
+ // The size field ends at the first space.
+ sp := strings.IndexByte(s, ' ')
+ if sp == -1 {
+ return "", "", s, ErrHeader
+ }
+
+ // Parse the first token as a decimal integer.
+ n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
+ if perr != nil || n < 5 || int64(len(s)) < n {
+ return "", "", s, ErrHeader
+ }
+
+ // Extract everything between the space and the final newline.
+ rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
+ if nl != "\n" {
+ return "", "", s, ErrHeader
+ }
+
+ // The first equals separates the key from the value.
+ eq := strings.IndexByte(rec, '=')
+ if eq == -1 {
+ return "", "", s, ErrHeader
+ }
+ k, v = rec[:eq], rec[eq+1:]
+
+ if !validPAXRecord(k, v) {
+ return "", "", s, ErrHeader
+ }
+ return k, v, rem, nil
+}
+
+// formatPAXRecord formats a single PAX record, prefixing it with the
+// appropriate length.
+func formatPAXRecord(k, v string) (string, error) {
+ if !validPAXRecord(k, v) {
+ return "", ErrHeader
+ }
+
+ const padding = 3 // Extra padding for ' ', '=', and '\n'
+ size := len(k) + len(v) + padding
+ size += len(strconv.Itoa(size))
+ record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
+
+ // Final adjustment if adding size field increased the record size.
+ if len(record) != size {
+ size = len(record)
+ record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
+ }
+ return record, nil
+}
+
+// validPAXRecord reports whether the key-value pair is valid where each
+// record is formatted as:
+// "%d %s=%s\n" % (size, key, value)
+//
+// Keys and values should be UTF-8, but the number of bad writers out there
+// forces us to be a more liberal.
+// Thus, we only reject all keys with NUL, and only reject NULs in values
+// for the PAX version of the USTAR string fields.
+// The key must not contain an '=' character.
+func validPAXRecord(k, v string) bool {
+ if k == "" || strings.IndexByte(k, '=') >= 0 {
+ return false
+ }
+ switch k {
+ case paxPath, paxLinkpath, paxUname, paxGname:
+ return !hasNUL(v)
+ default:
+ return !hasNUL(k)
+ }
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go
new file mode 100644
index 000000000000..e80498d03e39
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go
@@ -0,0 +1,653 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+// Writer provides sequential writing of a tar archive.
+// Write.WriteHeader begins a new file with the provided Header,
+// and then Writer can be treated as an io.Writer to supply that file's data.
+type Writer struct {
+ w io.Writer
+ pad int64 // Amount of padding to write after current file entry
+ curr fileWriter // Writer for current file entry
+ hdr Header // Shallow copy of Header that is safe for mutations
+ blk block // Buffer to use as temporary local storage
+
+ // err is a persistent error.
+ // It is only the responsibility of every exported method of Writer to
+ // ensure that this error is sticky.
+ err error
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{w: w, curr: ®FileWriter{w, 0}}
+}
+
+type fileWriter interface {
+ io.Writer
+ fileState
+
+ ReadFrom(io.Reader) (int64, error)
+}
+
+// Flush finishes writing the current file's block padding.
+// The current file must be fully written before Flush can be called.
+//
+// This is unnecessary as the next call to WriteHeader or Close
+// will implicitly flush out the file's padding.
+func (tw *Writer) Flush() error {
+ if tw.err != nil {
+ return tw.err
+ }
+ if nb := tw.curr.LogicalRemaining(); nb > 0 {
+ return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
+ }
+ if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
+ return tw.err
+ }
+ tw.pad = 0
+ return nil
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// The Header.Size determines how many bytes can be written for the next file.
+// If the current file is not fully written, then this returns an error.
+// This implicitly flushes any padding necessary before writing the header.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ tw.hdr = *hdr // Shallow copy of Header
+
+ // Avoid usage of the legacy TypeRegA flag, and automatically promote
+ // it to use TypeReg or TypeDir.
+ if tw.hdr.Typeflag == TypeRegA {
+ if strings.HasSuffix(tw.hdr.Name, "/") {
+ tw.hdr.Typeflag = TypeDir
+ } else {
+ tw.hdr.Typeflag = TypeReg
+ }
+ }
+
+ // Round ModTime and ignore AccessTime and ChangeTime unless
+ // the format is explicitly chosen.
+ // This ensures nominal usage of WriteHeader (without specifying the format)
+ // does not always result in the PAX format being chosen, which
+ // causes a 1KiB increase to every header.
+ if tw.hdr.Format == FormatUnknown {
+ tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
+ tw.hdr.AccessTime = time.Time{}
+ tw.hdr.ChangeTime = time.Time{}
+ }
+
+ allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
+ switch {
+ case allowedFormats.has(FormatUSTAR):
+ tw.err = tw.writeUSTARHeader(&tw.hdr)
+ return tw.err
+ case allowedFormats.has(FormatPAX):
+ tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
+ return tw.err
+ case allowedFormats.has(FormatGNU):
+ tw.err = tw.writeGNUHeader(&tw.hdr)
+ return tw.err
+ default:
+ return err // Non-fatal error
+ }
+}
+
+func (tw *Writer) writeUSTARHeader(hdr *Header) error {
+ // Check if we can use USTAR prefix/suffix splitting.
+ var namePrefix string
+ if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
+ namePrefix, hdr.Name = prefix, suffix
+ }
+
+ // Pack the main header.
+ var f formatter
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
+ f.formatString(blk.USTAR().Prefix(), namePrefix)
+ blk.SetFormat(FormatUSTAR)
+ if f.err != nil {
+ return f.err // Should never happen since header is validated
+ }
+ return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
+}
+
+func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+ realName, realSize := hdr.Name, hdr.Size
+
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ // Handle sparse files.
+ var spd sparseDatas
+ var spb []byte
+ if len(hdr.SparseHoles) > 0 {
+ sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
+ sph = alignSparseEntries(sph, hdr.Size)
+ spd = invertSparseEntries(sph, hdr.Size)
+
+ // Format the sparse map.
+ hdr.Size = 0 // Replace with encoded size
+ spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
+ for _, s := range spd {
+ hdr.Size += s.Length
+ spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
+ spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
+ }
+ pad := blockPadding(int64(len(spb)))
+ spb = append(spb, zeroBlock[:pad]...)
+ hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
+
+ // Add and modify appropriate PAX records.
+ dir, file := path.Split(realName)
+ hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
+ paxHdrs[paxGNUSparseMajor] = "1"
+ paxHdrs[paxGNUSparseMinor] = "0"
+ paxHdrs[paxGNUSparseName] = realName
+ paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
+ paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
+ delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
+ }
+ */
+ _ = realSize
+
+ // Write PAX records to the output.
+ isGlobal := hdr.Typeflag == TypeXGlobalHeader
+ if len(paxHdrs) > 0 || isGlobal {
+ // Sort keys for deterministic ordering.
+ var keys []string
+ for k := range paxHdrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ // Write each record to a buffer.
+ var buf strings.Builder
+ for _, k := range keys {
+ rec, err := formatPAXRecord(k, paxHdrs[k])
+ if err != nil {
+ return err
+ }
+ buf.WriteString(rec)
+ }
+
+ // Write the extended header file.
+ var name string
+ var flag byte
+ if isGlobal {
+ name = realName
+ if name == "" {
+ name = "GlobalHead.0.0"
+ }
+ flag = TypeXGlobalHeader
+ } else {
+ dir, file := path.Split(realName)
+ name = path.Join(dir, "PaxHeaders.0", file)
+ flag = TypeXHeader
+ }
+ data := buf.String()
+ if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
+ return err // Global headers return here
+ }
+ }
+
+ // Pack the main header.
+ var f formatter // Ignore errors since they are expected
+ fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
+ blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
+ blk.SetFormat(FormatPAX)
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+ return err
+ }
+
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ // Write the sparse map and setup the sparse writer if necessary.
+ if len(spd) > 0 {
+ // Use tw.curr since the sparse map is accounted for in hdr.Size.
+ if _, err := tw.curr.Write(spb); err != nil {
+ return err
+ }
+ tw.curr = &sparseFileWriter{tw.curr, spd, 0}
+ }
+ */
+ return nil
+}
+
+func (tw *Writer) writeGNUHeader(hdr *Header) error {
+ // Use long-link files if Name or Linkname exceeds the field size.
+ const longName = "././@LongLink"
+ if len(hdr.Name) > nameSize {
+ data := hdr.Name + "\x00"
+ if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
+ return err
+ }
+ }
+ if len(hdr.Linkname) > nameSize {
+ data := hdr.Linkname + "\x00"
+ if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
+ return err
+ }
+ }
+
+ // Pack the main header.
+ var f formatter // Ignore errors since they are expected
+ var spd sparseDatas
+ var spb []byte
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
+ if !hdr.AccessTime.IsZero() {
+ f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
+ }
+ if !hdr.ChangeTime.IsZero() {
+ f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
+ }
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ if hdr.Typeflag == TypeGNUSparse {
+ sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
+ sph = alignSparseEntries(sph, hdr.Size)
+ spd = invertSparseEntries(sph, hdr.Size)
+
+ // Format the sparse map.
+ formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
+ for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
+ f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
+ f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
+ sp = sp[1:]
+ }
+ if len(sp) > 0 {
+ sa.IsExtended()[0] = 1
+ }
+ return sp
+ }
+ sp2 := formatSPD(spd, blk.GNU().Sparse())
+ for len(sp2) > 0 {
+ var spHdr block
+ sp2 = formatSPD(sp2, spHdr.Sparse())
+ spb = append(spb, spHdr[:]...)
+ }
+
+ // Update size fields in the header block.
+ realSize := hdr.Size
+ hdr.Size = 0 // Encoded size; does not account for encoded sparse map
+ for _, s := range spd {
+ hdr.Size += s.Length
+ }
+ copy(blk.V7().Size(), zeroBlock[:]) // Reset field
+ f.formatNumeric(blk.V7().Size(), hdr.Size)
+ f.formatNumeric(blk.GNU().RealSize(), realSize)
+ }
+ */
+ blk.SetFormat(FormatGNU)
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+ return err
+ }
+
+ // Write the extended sparse map and setup the sparse writer if necessary.
+ if len(spd) > 0 {
+ // Use tw.w since the sparse map is not accounted for in hdr.Size.
+ if _, err := tw.w.Write(spb); err != nil {
+ return err
+ }
+ tw.curr = &sparseFileWriter{tw.curr, spd, 0}
+ }
+ return nil
+}
+
+type (
+ stringFormatter func([]byte, string)
+ numberFormatter func([]byte, int64)
+)
+
+// templateV7Plus fills out the V7 fields of a block using values from hdr.
+// It also fills out fields (uname, gname, devmajor, devminor) that are
+// shared in the USTAR, PAX, and GNU formats using the provided formatters.
+//
+// The block returned is only valid until the next call to
+// templateV7Plus or writeRawFile.
+func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
+ tw.blk.Reset()
+
+ modTime := hdr.ModTime
+ if modTime.IsZero() {
+ modTime = time.Unix(0, 0)
+ }
+
+ v7 := tw.blk.V7()
+ v7.TypeFlag()[0] = hdr.Typeflag
+ fmtStr(v7.Name(), hdr.Name)
+ fmtStr(v7.LinkName(), hdr.Linkname)
+ fmtNum(v7.Mode(), hdr.Mode)
+ fmtNum(v7.UID(), int64(hdr.Uid))
+ fmtNum(v7.GID(), int64(hdr.Gid))
+ fmtNum(v7.Size(), hdr.Size)
+ fmtNum(v7.ModTime(), modTime.Unix())
+
+ ustar := tw.blk.USTAR()
+ fmtStr(ustar.UserName(), hdr.Uname)
+ fmtStr(ustar.GroupName(), hdr.Gname)
+ fmtNum(ustar.DevMajor(), hdr.Devmajor)
+ fmtNum(ustar.DevMinor(), hdr.Devminor)
+
+ return &tw.blk
+}
+
+// writeRawFile writes a minimal file with the given name and flag type.
+// It uses format to encode the header format and will write data as the body.
+// It uses default values for all of the other fields (as BSD and GNU tar does).
+func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
+ tw.blk.Reset()
+
+ // Best effort for the filename.
+ name = toASCII(name)
+ if len(name) > nameSize {
+ name = name[:nameSize]
+ }
+ name = strings.TrimRight(name, "/")
+
+ var f formatter
+ v7 := tw.blk.V7()
+ v7.TypeFlag()[0] = flag
+ f.formatString(v7.Name(), name)
+ f.formatOctal(v7.Mode(), 0)
+ f.formatOctal(v7.UID(), 0)
+ f.formatOctal(v7.GID(), 0)
+ f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
+ f.formatOctal(v7.ModTime(), 0)
+ tw.blk.SetFormat(format)
+ if f.err != nil {
+ return f.err // Only occurs if size condition is violated
+ }
+
+ // Write the header and data.
+ if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
+ return err
+ }
+ _, err := io.WriteString(tw, data)
+ return err
+}
+
+// writeRawHeader writes the value of blk, regardless of its value.
+// It sets up the Writer such that it can accept a file of the given size.
+// If the flag is a special header-only flag, then the size is treated as zero.
+func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ if _, err := tw.w.Write(blk[:]); err != nil {
+ return err
+ }
+ if isHeaderOnlyType(flag) {
+ size = 0
+ }
+ tw.curr = ®FileWriter{tw.w, size}
+ tw.pad = blockPadding(size)
+ return nil
+}
+
+// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
+// If the path is not splittable, then it will return ("", "", false).
+func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
+ length := len(name)
+ if length <= nameSize || !isASCII(name) {
+ return "", "", false
+ } else if length > prefixSize+1 {
+ length = prefixSize + 1
+ } else if name[length-1] == '/' {
+ length--
+ }
+
+ i := strings.LastIndex(name[:length], "/")
+ nlen := len(name) - i - 1 // nlen is length of suffix
+ plen := i // plen is length of prefix
+ if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
+ return "", "", false
+ }
+ return name[:i], name[i+1:], true
+}
+
+// Write writes to the current file in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// Header.Size bytes are written after WriteHeader.
+//
+// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
+// of what the Header.Size claims.
+func (tw *Writer) Write(b []byte) (int, error) {
+ if tw.err != nil {
+ return 0, tw.err
+ }
+ n, err := tw.curr.Write(b)
+ if err != nil && err != ErrWriteTooLong {
+ tw.err = err
+ }
+ return n, err
+}
+
+// readFrom populates the content of the current file by reading from r.
+// The bytes read must match the number of remaining bytes in the current file.
+//
+// If the current file is sparse and r is an io.ReadSeeker,
+// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
+// assuming that skipped regions are all NULs.
+// This always reads the last byte to ensure r is the right size.
+//
+// TODO(dsnet): Re-export this when adding sparse file support.
+// See https://golang.org/issue/22735
+func (tw *Writer) readFrom(r io.Reader) (int64, error) {
+ if tw.err != nil {
+ return 0, tw.err
+ }
+ n, err := tw.curr.ReadFrom(r)
+ if err != nil && err != ErrWriteTooLong {
+ tw.err = err
+ }
+ return n, err
+}
+
+// Close closes the tar archive by flushing the padding, and writing the footer.
+// If the current file (from a prior call to WriteHeader) is not fully written,
+// then this returns an error.
+func (tw *Writer) Close() error {
+ if tw.err == ErrWriteAfterClose {
+ return nil
+ }
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // Trailer: two zero blocks.
+ err := tw.Flush()
+ for i := 0; i < 2 && err == nil; i++ {
+ _, err = tw.w.Write(zeroBlock[:])
+ }
+
+ // Ensure all future actions are invalid.
+ tw.err = ErrWriteAfterClose
+ return err // Report IO errors
+}
+
+// regFileWriter is a fileWriter for writing data to a regular file entry.
+type regFileWriter struct {
+ w io.Writer // Underlying Writer
+ nb int64 // Number of remaining bytes to write
+}
+
+func (fw *regFileWriter) Write(b []byte) (n int, err error) {
+ overwrite := int64(len(b)) > fw.nb
+ if overwrite {
+ b = b[:fw.nb]
+ }
+ if len(b) > 0 {
+ n, err = fw.w.Write(b)
+ fw.nb -= int64(n)
+ }
+ switch {
+ case err != nil:
+ return n, err
+ case overwrite:
+ return n, ErrWriteTooLong
+ default:
+ return n, nil
+ }
+}
+
+func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
+ return io.Copy(struct{ io.Writer }{fw}, r)
+}
+
+func (fw regFileWriter) LogicalRemaining() int64 {
+ return fw.nb
+}
+func (fw regFileWriter) PhysicalRemaining() int64 {
+ return fw.nb
+}
+
+// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
+type sparseFileWriter struct {
+ fw fileWriter // Underlying fileWriter
+ sp sparseDatas // Normalized list of data fragments
+ pos int64 // Current position in sparse file
+}
+
+func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
+ overwrite := int64(len(b)) > sw.LogicalRemaining()
+ if overwrite {
+ b = b[:sw.LogicalRemaining()]
+ }
+
+ b0 := b
+ endPos := sw.pos + int64(len(b))
+ for endPos > sw.pos && err == nil {
+ var nf int // Bytes written in fragment
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+ if sw.pos < dataStart { // In a hole fragment
+ bf := b[:min(int64(len(b)), dataStart-sw.pos)]
+ nf, err = zeroWriter{}.Write(bf)
+ } else { // In a data fragment
+ bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
+ nf, err = sw.fw.Write(bf)
+ }
+ b = b[nf:]
+ sw.pos += int64(nf)
+ if sw.pos >= dataEnd && len(sw.sp) > 1 {
+ sw.sp = sw.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ n = len(b0) - len(b)
+ switch {
+ case err == ErrWriteTooLong:
+ return n, errMissData // Not possible; implies bug in validation logic
+ case err != nil:
+ return n, err
+ case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+ return n, errUnrefData // Not possible; implies bug in validation logic
+ case overwrite:
+ return n, ErrWriteTooLong
+ default:
+ return n, nil
+ }
+}
+
+func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
+ rs, ok := r.(io.ReadSeeker)
+ if ok {
+ if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
+ ok = false // Not all io.Seeker can really seek
+ }
+ }
+ if !ok {
+ return io.Copy(struct{ io.Writer }{sw}, r)
+ }
+
+ var readLastByte bool
+ pos0 := sw.pos
+ for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
+ var nf int64 // Size of fragment
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+ if sw.pos < dataStart { // In a hole fragment
+ nf = dataStart - sw.pos
+ if sw.PhysicalRemaining() == 0 {
+ readLastByte = true
+ nf--
+ }
+ _, err = rs.Seek(nf, io.SeekCurrent)
+ } else { // In a data fragment
+ nf = dataEnd - sw.pos
+ nf, err = io.CopyN(sw.fw, rs, nf)
+ }
+ sw.pos += nf
+ if sw.pos >= dataEnd && len(sw.sp) > 1 {
+ sw.sp = sw.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ // If the last fragment is a hole, then seek to 1-byte before EOF, and
+ // read a single byte to ensure the file is the right size.
+ if readLastByte && err == nil {
+ _, err = mustReadFull(rs, []byte{0})
+ sw.pos++
+ }
+
+ n = sw.pos - pos0
+ switch {
+ case err == io.EOF:
+ return n, io.ErrUnexpectedEOF
+ case err == ErrWriteTooLong:
+ return n, errMissData // Not possible; implies bug in validation logic
+ case err != nil:
+ return n, err
+ case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+ return n, errUnrefData // Not possible; implies bug in validation logic
+ default:
+ return n, ensureEOF(rs)
+ }
+}
+
+func (sw sparseFileWriter) LogicalRemaining() int64 {
+ return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
+}
+func (sw sparseFileWriter) PhysicalRemaining() int64 {
+ return sw.fw.PhysicalRemaining()
+}
+
+// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
+type zeroWriter struct{}
+
+func (zeroWriter) Write(b []byte) (int, error) {
+ for i, c := range b {
+ if c != 0 {
+ return i, errWriteHole
+ }
+ }
+ return len(b), nil
+}
+
+// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
+func ensureEOF(r io.Reader) error {
+ n, err := tryReadFull(r, []byte{0})
+ switch {
+ case n > 0:
+ return ErrWriteTooLong
+ case err == io.EOF:
+ return nil
+ default:
+ return err
+ }
+}
diff --git a/vendor/github.com/vishvananda/netns/.golangci.yml b/vendor/github.com/vishvananda/netns/.golangci.yml
new file mode 100644
index 000000000000..600bef78e2b7
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/.golangci.yml
@@ -0,0 +1,2 @@
+run:
+ timeout: 5m
diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md
index 1fdb2d3e4ae1..bdfedbe81fa5 100644
--- a/vendor/github.com/vishvananda/netns/README.md
+++ b/vendor/github.com/vishvananda/netns/README.md
@@ -23,6 +23,7 @@ import (
"fmt"
"net"
"runtime"
+
"github.com/vishvananda/netns"
)
@@ -48,14 +49,3 @@ func main() {
}
```
-
-## NOTE
-
-The library can be safely used only with Go >= 1.10 due to [golang/go#20676](/~https://github.com/golang/go/issues/20676).
-
-After locking a goroutine to its current OS thread with `runtime.LockOSThread()`
-and changing its network namespace, any new subsequent goroutine won't be
-scheduled on that thread while it's locked. Therefore, the new goroutine
-will run in a different namespace leading to unexpected results.
-
-See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details.
diff --git a/vendor/github.com/vishvananda/netns/doc.go b/vendor/github.com/vishvananda/netns/doc.go
new file mode 100644
index 000000000000..cd4093a4d7e9
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/doc.go
@@ -0,0 +1,9 @@
+// Package netns allows ultra-simple network namespace handling. NsHandles
+// can be retrieved and set. Note that the current namespace is thread
+// local so actions that set and reset namespaces should use LockOSThread
+// to make sure the namespace doesn't change due to a goroutine switch.
+// It is best to close NsHandles when you are done with them. This can be
+// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
+// requires elevated privileges, so in most cases this code needs to be run
+// as root.
+package netns
diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/netns.go
deleted file mode 100644
index 116befd548c4..000000000000
--- a/vendor/github.com/vishvananda/netns/netns.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Package netns allows ultra-simple network namespace handling. NsHandles
-// can be retrieved and set. Note that the current namespace is thread
-// local so actions that set and reset namespaces should use LockOSThread
-// to make sure the namespace doesn't change due to a goroutine switch.
-// It is best to close NsHandles when you are done with them. This can be
-// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
-// requires elevated privileges, so in most cases this code needs to be run
-// as root.
-package netns
-
-import (
- "fmt"
-
- "golang.org/x/sys/unix"
-)
-
-// NsHandle is a handle to a network namespace. It can be cast directly
-// to an int and used as a file descriptor.
-type NsHandle int
-
-// Equal determines if two network handles refer to the same network
-// namespace. This is done by comparing the device and inode that the
-// file descriptors point to.
-func (ns NsHandle) Equal(other NsHandle) bool {
- if ns == other {
- return true
- }
- var s1, s2 unix.Stat_t
- if err := unix.Fstat(int(ns), &s1); err != nil {
- return false
- }
- if err := unix.Fstat(int(other), &s2); err != nil {
- return false
- }
- return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino)
-}
-
-// String shows the file descriptor number and its dev and inode.
-func (ns NsHandle) String() string {
- if ns == -1 {
- return "NS(None)"
- }
- var s unix.Stat_t
- if err := unix.Fstat(int(ns), &s); err != nil {
- return fmt.Sprintf("NS(%d: unknown)", ns)
- }
- return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino)
-}
-
-// UniqueId returns a string which uniquely identifies the namespace
-// associated with the network handle.
-func (ns NsHandle) UniqueId() string {
- if ns == -1 {
- return "NS(none)"
- }
- var s unix.Stat_t
- if err := unix.Fstat(int(ns), &s); err != nil {
- return "NS(unknown)"
- }
- return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino)
-}
-
-// IsOpen returns true if Close() has not been called.
-func (ns NsHandle) IsOpen() bool {
- return ns != -1
-}
-
-// Close closes the NsHandle and resets its file descriptor to -1.
-// It is not safe to use an NsHandle after Close() is called.
-func (ns *NsHandle) Close() error {
- if err := unix.Close(int(*ns)); err != nil {
- return err
- }
- (*ns) = -1
- return nil
-}
-
-// None gets an empty (closed) NsHandle.
-func None() NsHandle {
- return NsHandle(-1)
-}
diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go
index 36e64906b6ce..2ed7c7e2fa40 100644
--- a/vendor/github.com/vishvananda/netns/netns_linux.go
+++ b/vendor/github.com/vishvananda/netns/netns_linux.go
@@ -1,33 +1,31 @@
-// +build linux,go1.10
-
package netns
import (
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
- "syscall"
"golang.org/x/sys/unix"
)
-// Deprecated: use syscall pkg instead (go >= 1.5 needed).
+// Deprecated: use golang.org/x/sys/unix pkg instead.
const (
- CLONE_NEWUTS = 0x04000000 /* New utsname group? */
- CLONE_NEWIPC = 0x08000000 /* New ipcs */
- CLONE_NEWUSER = 0x10000000 /* New user namespace */
- CLONE_NEWPID = 0x20000000 /* New pid namespace */
- CLONE_NEWNET = 0x40000000 /* New network namespace */
- CLONE_IO = 0x80000000 /* Get io context */
- bindMountPath = "/run/netns" /* Bind mount path for named netns */
+ CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */
+ CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */
+ CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */
+ CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */
+ CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */
+ CLONE_IO = unix.CLONE_IO /* Get io context */
)
-// Setns sets namespace using syscall. Note that this should be a method
-// in syscall but it has not been added.
+const bindMountPath = "/run/netns" /* Bind mount path for named netns */
+
+// Setns sets namespace using golang.org/x/sys/unix.Setns.
+//
+// Deprecated: Use golang.org/x/sys/unix.Setns instead.
func Setns(ns NsHandle, nstype int) (err error) {
return unix.Setns(int(ns), nstype)
}
@@ -35,19 +33,20 @@ func Setns(ns NsHandle, nstype int) (err error) {
// Set sets the current network namespace to the namespace represented
// by NsHandle.
func Set(ns NsHandle) (err error) {
- return Setns(ns, CLONE_NEWNET)
+ return unix.Setns(int(ns), unix.CLONE_NEWNET)
}
// New creates a new network namespace, sets it as current and returns
// a handle to it.
func New() (ns NsHandle, err error) {
- if err := unix.Unshare(CLONE_NEWNET); err != nil {
+ if err := unix.Unshare(unix.CLONE_NEWNET); err != nil {
return -1, err
}
return Get()
}
-// NewNamed creates a new named network namespace and returns a handle to it
+// NewNamed creates a new named network namespace, sets it as current,
+// and returns a handle to it
func NewNamed(name string) (NsHandle, error) {
if _, err := os.Stat(bindMountPath); os.IsNotExist(err) {
err = os.MkdirAll(bindMountPath, 0755)
@@ -65,13 +64,15 @@ func NewNamed(name string) (NsHandle, error) {
f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444)
if err != nil {
+ newNs.Close()
return None(), err
}
f.Close()
- nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid())
- err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "")
+ nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
+ err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "")
if err != nil {
+ newNs.Close()
return None(), err
}
@@ -82,7 +83,7 @@ func NewNamed(name string) (NsHandle, error) {
func DeleteNamed(name string) error {
namedPath := path.Join(bindMountPath, name)
- err := syscall.Unmount(namedPath, syscall.MNT_DETACH)
+ err := unix.Unmount(namedPath, unix.MNT_DETACH)
if err != nil {
return err
}
@@ -108,7 +109,7 @@ func GetFromPath(path string) (NsHandle, error) {
// GetFromName gets a handle to a named network namespace such as one
// created by `ip netns add`.
func GetFromName(name string) (NsHandle, error) {
- return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name))
+ return GetFromPath(filepath.Join(bindMountPath, name))
}
// GetFromPid gets a handle to the network namespace of a given pid.
@@ -133,33 +134,38 @@ func GetFromDocker(id string) (NsHandle, error) {
}
// borrowed from docker/utils/utils.go
-func findCgroupMountpoint(cgroupType string) (string, error) {
- output, err := ioutil.ReadFile("/proc/mounts")
+func findCgroupMountpoint(cgroupType string) (int, string, error) {
+ output, err := os.ReadFile("/proc/mounts")
if err != nil {
- return "", err
+ return -1, "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
- if len(parts) == 6 && parts[2] == "cgroup" {
- for _, opt := range strings.Split(parts[3], ",") {
- if opt == cgroupType {
- return parts[1], nil
+ if len(parts) == 6 {
+ switch parts[2] {
+ case "cgroup2":
+ return 2, parts[1], nil
+ case "cgroup":
+ for _, opt := range strings.Split(parts[3], ",") {
+ if opt == cgroupType {
+ return 1, parts[1], nil
+ }
}
}
}
}
- return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
+ return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
// Returns the relative path to the cgroup docker is running in.
// borrowed from docker/utils/utils.go
// modified to get the docker pid instead of using /proc/self
-func getThisCgroup(cgroupType string) (string, error) {
- dockerpid, err := ioutil.ReadFile("/var/run/docker.pid")
+func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) {
+ dockerpid, err := os.ReadFile("/var/run/docker.pid")
if err != nil {
return "", err
}
@@ -171,14 +177,15 @@ func getThisCgroup(cgroupType string) (string, error) {
if err != nil {
return "", err
}
- output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
+ output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, ":")
// any type used by docker should work
- if parts[1] == cgroupType {
+ if (cgroupVer == 1 && parts[1] == cgroupType) ||
+ (cgroupVer == 2 && parts[1] == "") {
return parts[2], nil
}
}
@@ -190,46 +197,56 @@ func getThisCgroup(cgroupType string) (string, error) {
// modified to only return the first pid
// modified to glob with id
// modified to search for newer docker containers
+// modified to look for cgroups v2
func getPidForContainer(id string) (int, error) {
pid := 0
// memory is chosen randomly, any cgroup used by docker works
cgroupType := "memory"
- cgroupRoot, err := findCgroupMountpoint(cgroupType)
+ cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType)
if err != nil {
return pid, err
}
- cgroupThis, err := getThisCgroup(cgroupType)
+ cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType)
if err != nil {
return pid, err
}
id += "*"
+ var pidFile string
+ if cgroupVer == 1 {
+ pidFile = "tasks"
+ } else if cgroupVer == 2 {
+ pidFile = "cgroup.procs"
+ } else {
+ return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer)
+ }
+
attempts := []string{
- filepath.Join(cgroupRoot, cgroupThis, id, "tasks"),
+ filepath.Join(cgroupRoot, cgroupDocker, id, pidFile),
// With more recent lxc versions use, cgroup will be in lxc/
- filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"),
+ filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile),
// With more recent docker, cgroup will be in docker/
- filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"),
+ filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile),
// Even more recent docker versions under systemd use docker-.scope/
- filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"),
+ filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile),
// Even more recent docker versions under cgroup/systemd/docker//
- filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"),
+ filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile),
// Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS
- filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"),
+ filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile),
// Same as above but for Guaranteed QoS
- filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, "tasks"),
+ filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile),
// Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS
- filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"),
+ filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile),
// Same as above but for Guaranteed QoS
- filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"),
+ filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile),
// When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS
- filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"),
+ filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile),
// Same as above but for Guaranteed QoS
- filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"),
+ filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile),
}
var filename string
@@ -247,7 +264,7 @@ func getPidForContainer(id string) (int, error) {
return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1])
}
- output, err := ioutil.ReadFile(filename)
+ output, err := os.ReadFile(filename)
if err != nil {
return pid, err
}
diff --git a/vendor/github.com/vishvananda/netns/netns_others.go b/vendor/github.com/vishvananda/netns/netns_others.go
new file mode 100644
index 000000000000..0489837741bc
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/netns_others.go
@@ -0,0 +1,60 @@
+//go:build !linux
+// +build !linux
+
+package netns
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It
+// is not implemented on other platforms.
+//
+// Deprecated: Use golang.org/x/sys/unix.Setns instead.
+func Setns(ns NsHandle, nstype int) (err error) {
+ return ErrNotImplemented
+}
+
+func Set(ns NsHandle) (err error) {
+ return ErrNotImplemented
+}
+
+func New() (ns NsHandle, err error) {
+ return -1, ErrNotImplemented
+}
+
+func NewNamed(name string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func DeleteNamed(name string) error {
+ return ErrNotImplemented
+}
+
+func Get() (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromPath(path string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromName(name string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromPid(pid int) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromThread(pid, tid int) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromDocker(id string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netns/netns_unspecified.go b/vendor/github.com/vishvananda/netns/netns_unspecified.go
deleted file mode 100644
index d06af62b68ad..000000000000
--- a/vendor/github.com/vishvananda/netns/netns_unspecified.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// +build !linux
-
-package netns
-
-import (
- "errors"
-)
-
-var (
- ErrNotImplemented = errors.New("not implemented")
-)
-
-func Set(ns NsHandle) (err error) {
- return ErrNotImplemented
-}
-
-func New() (ns NsHandle, err error) {
- return -1, ErrNotImplemented
-}
-
-func Get() (NsHandle, error) {
- return -1, ErrNotImplemented
-}
-
-func GetFromPath(path string) (NsHandle, error) {
- return -1, ErrNotImplemented
-}
-
-func GetFromName(name string) (NsHandle, error) {
- return -1, ErrNotImplemented
-}
-
-func GetFromPid(pid int) (NsHandle, error) {
- return -1, ErrNotImplemented
-}
-
-func GetFromThread(pid, tid int) (NsHandle, error) {
- return -1, ErrNotImplemented
-}
-
-func GetFromDocker(id string) (NsHandle, error) {
- return -1, ErrNotImplemented
-}
diff --git a/vendor/github.com/vishvananda/netns/nshandle_linux.go b/vendor/github.com/vishvananda/netns/nshandle_linux.go
new file mode 100644
index 000000000000..1baffb66acbf
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/nshandle_linux.go
@@ -0,0 +1,73 @@
+package netns
+
+import (
+ "fmt"
+
+ "golang.org/x/sys/unix"
+)
+
+// NsHandle is a handle to a network namespace. It can be cast directly
+// to an int and used as a file descriptor.
+type NsHandle int
+
+// Equal determines if two network handles refer to the same network
+// namespace. This is done by comparing the device and inode that the
+// file descriptors point to.
+func (ns NsHandle) Equal(other NsHandle) bool {
+ if ns == other {
+ return true
+ }
+ var s1, s2 unix.Stat_t
+ if err := unix.Fstat(int(ns), &s1); err != nil {
+ return false
+ }
+ if err := unix.Fstat(int(other), &s2); err != nil {
+ return false
+ }
+ return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino)
+}
+
+// String shows the file descriptor number and its dev and inode.
+func (ns NsHandle) String() string {
+ if ns == -1 {
+ return "NS(none)"
+ }
+ var s unix.Stat_t
+ if err := unix.Fstat(int(ns), &s); err != nil {
+ return fmt.Sprintf("NS(%d: unknown)", ns)
+ }
+ return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino)
+}
+
+// UniqueId returns a string which uniquely identifies the namespace
+// associated with the network handle.
+func (ns NsHandle) UniqueId() string {
+ if ns == -1 {
+ return "NS(none)"
+ }
+ var s unix.Stat_t
+ if err := unix.Fstat(int(ns), &s); err != nil {
+ return "NS(unknown)"
+ }
+ return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino)
+}
+
+// IsOpen returns true if Close() has not been called.
+func (ns NsHandle) IsOpen() bool {
+ return ns != -1
+}
+
+// Close closes the NsHandle and resets its file descriptor to -1.
+// It is not safe to use an NsHandle after Close() is called.
+func (ns *NsHandle) Close() error {
+ if err := unix.Close(int(*ns)); err != nil {
+ return err
+ }
+ *ns = -1
+ return nil
+}
+
+// None gets an empty (closed) NsHandle.
+func None() NsHandle {
+ return NsHandle(-1)
+}
diff --git a/vendor/github.com/vishvananda/netns/nshandle_others.go b/vendor/github.com/vishvananda/netns/nshandle_others.go
new file mode 100644
index 000000000000..af727bc091c5
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/nshandle_others.go
@@ -0,0 +1,45 @@
+//go:build !linux
+// +build !linux
+
+package netns
+
+// NsHandle is a handle to a network namespace. It can only be used on Linux,
+// but provides stub methods on other platforms.
+type NsHandle int
+
+// Equal determines if two network handles refer to the same network
+// namespace. It is only implemented on Linux.
+func (ns NsHandle) Equal(_ NsHandle) bool {
+ return false
+}
+
+// String shows the file descriptor number and its dev and inode.
+// It is only implemented on Linux, and returns "NS(none)" on other
+// platforms.
+func (ns NsHandle) String() string {
+ return "NS(none)"
+}
+
+// UniqueId returns a string which uniquely identifies the namespace
+// associated with the network handle. It is only implemented on Linux,
+// and returns "NS(none)" on other platforms.
+func (ns NsHandle) UniqueId() string {
+ return "NS(none)"
+}
+
+// IsOpen returns true if Close() has not been called. It is only implemented
+// on Linux and always returns false on other platforms.
+func (ns NsHandle) IsOpen() bool {
+ return false
+}
+
+// Close closes the NsHandle and resets its file descriptor to -1.
+// It is only implemented on Linux.
+func (ns *NsHandle) Close() error {
+ return nil
+}
+
+// None gets an empty (closed) NsHandle.
+func None() NsHandle {
+ return NsHandle(-1)
+}
diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml
new file mode 100644
index 000000000000..8902bdaaffc2
--- /dev/null
+++ b/vendor/github.com/x448/float16/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.11.x
+
+env:
+ - GO111MODULE=on
+
+script:
+ - go test -short -coverprofile=coverage.txt -covermode=count ./...
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE
new file mode 100644
index 000000000000..bf6e357854a0
--- /dev/null
+++ b/vendor/github.com/x448/float16/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md
new file mode 100644
index 000000000000..b524b8135d58
--- /dev/null
+++ b/vendor/github.com/x448/float16/README.md
@@ -0,0 +1,133 @@
+# Float16 (Binary16) in Go/Golang
+[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16)
+[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16)
+[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16)
+[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](/~https://github.com/x448/float16/releases)
+[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
+
+`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
+
+IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
+
+All possible 4+ billion floating-point conversions with this library are verified to be correct.
+
+Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
+
+## Features
+Current features include:
+
+* float16 to float32 conversions use lossless conversion.
+* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
+* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
+* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
+* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
+* all functions in this library use zero allocs except String().
+
+## Status
+This library is used by [fxamacker/cbor](/~https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
+
+Current status:
+
+* core API is done and breaking API changes are unlikely.
+* 100% of unit tests pass:
+ * short mode (`go test -short`) tests around 65765 conversions in 0.005s.
+ * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
+* 100% code coverage with both short mode and normal mode.
+* tested on amd64 but it should work on all little-endian platforms supported by Go.
+
+Roadmap:
+
+* add functions for fast batch conversions leveraging SIMD when supported by hardware.
+* speed up unit test when verifying all possible 4+ billion conversions.
+* test on additional platforms.
+
+## Float16 to Float32 Conversion
+Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
+
+Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
+
+## Float32 to Float16 Conversion
+Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
+
+Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
+
+Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
+
+## Usage
+Install with `go get github.com/x448/float16`.
+```
+// Convert float32 to float16
+pi := float32(math.Pi)
+pi16 := float16.Fromfloat32(pi)
+
+// Convert float16 to float32
+pi32 := pi16.Float32()
+
+// PrecisionFromfloat32() is faster than the overhead of calling a function.
+// This example only converts if there's no data loss and input is not a subnormal.
+if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
+ pi16 := float16.Fromfloat32(pi)
+}
+```
+
+## Float16 Type and API
+Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
+```
+package float16 // import "github.com/x448/float16"
+
+// Exported types and consts
+type Float16 uint16
+const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
+
+// Exported functions
+Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
+ with identical results to AMD and Intel F16C hardware. NaN inputs
+ are converted with quiet bit always set on, to be like F16C.
+
+FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
+ // The "ps" suffix means "preserve signaling".
+ // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
+
+Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
+NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
+Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
+
+PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
+ // (inline and < 1 ns/op)
+// Exported methods
+(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
+(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
+(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
+(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
+(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
+(f Float16) IsFinite() bool // true if f is not infinite or NaN
+(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
+(f Float16) Signbit() bool // true if f is negative or negative zero
+(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
+```
+See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
+
+## Benchmarks
+Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
+
+```
+All functions have zero allocations except float16.String().
+
+FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
+ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
+Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
+
+PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
+```
+
+## System Requirements
+* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
+* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
+
+## Special Thanks
+Special thanks to Kathryn Long (starkat99) for creating [half-rs](/~https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
+
+## License
+Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+
+Licensed under [MIT License](LICENSE)
diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go
new file mode 100644
index 000000000000..1a0e6dad0046
--- /dev/null
+++ b/vendor/github.com/x448/float16/float16.go
@@ -0,0 +1,302 @@
+// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+//
+// Special thanks to Kathryn Long for her Rust implementation
+// of float16 at github.com/starkat99/half-rs (MIT license)
+
+package float16
+
+import (
+ "math"
+ "strconv"
+)
+
+// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
+type Float16 uint16
+
+// Precision indicates whether the conversion to Float16 is
+// exact, subnormal without dropped bits, inexact, underflow, or overflow.
+type Precision int
+
+const (
+
+ // PrecisionExact is for non-subnormals that don't drop bits during conversion.
+ // All of these can round-trip. Should always convert to float16.
+ PrecisionExact Precision = iota
+
+ // PrecisionUnknown is for subnormals that don't drop bits during conversion but
+ // not all of these can round-trip so precision is unknown without more effort.
+ // Only 2046 of these can round-trip and the rest cannot round-trip.
+ PrecisionUnknown
+
+ // PrecisionInexact is for dropped significand bits and cannot round-trip.
+ // Some of these are subnormals. Cannot round-trip float32->float16->float32.
+ PrecisionInexact
+
+ // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
+ PrecisionUnderflow
+
+ // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
+ PrecisionOverflow
+)
+
+// PrecisionFromfloat32 returns Precision without performing
+// the conversion. Conversions from both Infinity and NaN
+// values will always report PrecisionExact even if NaN payload
+// or NaN-Quiet-Bit is lost. This function is kept simple to
+// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
+func PrecisionFromfloat32(f32 float32) Precision {
+ u32 := math.Float32bits(f32)
+
+ if u32 == 0 || u32 == 0x80000000 {
+ // +- zero will always be exact conversion
+ return PrecisionExact
+ }
+
+ const COEFMASK uint32 = 0x7fffff // 23 least significant bits
+ const EXPSHIFT uint32 = 23
+ const EXPBIAS uint32 = 127
+ const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
+ const DROPMASK uint32 = COEFMASK >> 10
+
+ exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
+ coef := u32 & COEFMASK
+
+ if exp == 128 {
+ // +- infinity or NaN
+ // apps may want to do extra checks for NaN separately
+ return PrecisionExact
+ }
+
+ // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
+ // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
+ if exp < -24 {
+ return PrecisionUnderflow
+ }
+ if exp > 15 {
+ return PrecisionOverflow
+ }
+ if (coef & DROPMASK) != uint32(0) {
+ // these include subnormals and non-subnormals that dropped bits
+ return PrecisionInexact
+ }
+
+ if exp < -14 {
+ // Subnormals. Caller may want to test these further.
+ // There are 2046 subnormals that can successfully round-trip f32->f16->f32
+ // and 20 of those 2046 have 32-bit input coef == 0.
+ // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
+ // so some protocols and libraries will choose to handle subnormals differently
+ // when deciding to encode them to CBOR float32 vs float16.
+ return PrecisionUnknown
+ }
+
+ return PrecisionExact
+}
+
+// Frombits returns the float16 number corresponding to the IEEE 754 binary16
+// representation u16, with the sign bit of u16 and the result in the same bit
+// position. Frombits(Bits(x)) == x.
+func Frombits(u16 uint16) Float16 {
+ return Float16(u16)
+}
+
+// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
+// IEEE default rounding (nearest int, with ties to even).
+func Fromfloat32(f32 float32) Float16 {
+ return Float16(f32bitsToF16bits(math.Float32bits(f32)))
+}
+
+// ErrInvalidNaNValue indicates a NaN was not received.
+const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
+
+type float16Error string
+
+func (e float16Error) Error() string { return string(e) }
+
+// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
+// signaling and payload. Unlike Fromfloat32(), which can only return
+// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
+// If the result is infinity (sNaN with empty payload), then the
+// lowest bit of payload is set to make the result a NaN.
+// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
+// This function was kept simple to be able to inline.
+func FromNaN32ps(nan float32) (Float16, error) {
+ const SNAN = Float16(uint16(0x7c01)) // signalling NaN
+
+ u32 := math.Float32bits(nan)
+ sign := u32 & 0x80000000
+ exp := u32 & 0x7f800000
+ coef := u32 & 0x007fffff
+
+ if (exp != 0x7f800000) || (coef == 0) {
+ return SNAN, ErrInvalidNaNValue
+ }
+
+ u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
+
+ if (u16 & 0x03ff) == 0 {
+ // result became infinity, make it NaN by setting lowest bit in payload
+ u16 = u16 | 0x0001
+ }
+
+ return Float16(u16), nil
+}
+
+// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
+// Returned NaN value 0x7e01 has all exponent bits = 1 with the
+// first and last bits = 1 in the significand. This is consistent
+// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
+func NaN() Float16 {
+ return Float16(0x7e01)
+}
+
+// Inf returns a Float16 with an infinity value with the specified sign.
+// A sign >= returns positive infinity.
+// A sign < 0 returns negative infinity.
+func Inf(sign int) Float16 {
+ if sign >= 0 {
+ return Float16(0x7c00)
+ }
+ return Float16(0x8000 | 0x7c00)
+}
+
+// Float32 returns a float32 converted from f (Float16).
+// This is a lossless conversion.
+func (f Float16) Float32() float32 {
+ u32 := f16bitsToF32bits(uint16(f))
+ return math.Float32frombits(u32)
+}
+
+// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
+// of f and the result in the same bit position. Bits(Frombits(x)) == x.
+func (f Float16) Bits() uint16 {
+ return uint16(f)
+}
+
+// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
+func (f Float16) IsNaN() bool {
+ return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
+}
+
+// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
+// “not-a-number” value.
+func (f Float16) IsQuietNaN() bool {
+ return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
+}
+
+// IsInf reports whether f is an infinity (inf).
+// A sign > 0 reports whether f is positive inf.
+// A sign < 0 reports whether f is negative inf.
+// A sign == 0 reports whether f is either inf.
+func (f Float16) IsInf(sign int) bool {
+ return ((f == 0x7c00) && sign >= 0) ||
+ (f == 0xfc00 && sign <= 0)
+}
+
+// IsFinite returns true if f is neither infinite nor NaN.
+func (f Float16) IsFinite() bool {
+ return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
+}
+
+// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
+func (f Float16) IsNormal() bool {
+ exp := uint16(f) & uint16(0x7c00)
+ return (exp != uint16(0x7c00)) && (exp != 0)
+}
+
+// Signbit reports whether f is negative or negative zero.
+func (f Float16) Signbit() bool {
+ return (uint16(f) & uint16(0x8000)) != 0
+}
+
+// String satisfies the fmt.Stringer interface.
+func (f Float16) String() string {
+ return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
+}
+
+// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
+func f16bitsToF32bits(in uint16) uint32 {
+ // All 65536 conversions with this were confirmed to be correct
+ // by Montgomery Edwards⁴⁴⁸ (github.com/x448).
+
+ sign := uint32(in&0x8000) << 16 // sign for 32-bit
+ exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
+ coef := uint32(in&0x03ff) << 13 // significand for 32-bit
+
+ if exp == 0x1f {
+ if coef == 0 {
+ // infinity
+ return sign | 0x7f800000 | coef
+ }
+ // NaN
+ return sign | 0x7fc00000 | coef
+ }
+
+ if exp == 0 {
+ if coef == 0 {
+ // zero
+ return sign
+ }
+
+ // normalize subnormal numbers
+ exp++
+ for coef&0x7f800000 == 0 {
+ coef <<= 1
+ exp--
+ }
+ coef &= 0x007fffff
+ }
+
+ return sign | ((exp + (0x7f - 0xf)) << 23) | coef
+}
+
+// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
+// Conversion rounds to nearest integer with ties to even.
+func f32bitsToF16bits(u32 uint32) uint16 {
+ // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
+ // All 4294967296 conversions with this were confirmed to be correct by x448.
+ // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
+
+ sign := u32 & 0x80000000
+ exp := u32 & 0x7f800000
+ coef := u32 & 0x007fffff
+
+ if exp == 0x7f800000 {
+ // NaN or Infinity
+ nanBit := uint32(0)
+ if coef != 0 {
+ nanBit = uint32(0x0200)
+ }
+ return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
+ }
+
+ halfSign := sign >> 16
+
+ unbiasedExp := int32(exp>>23) - 127
+ halfExp := unbiasedExp + 15
+
+ if halfExp >= 0x1f {
+ return uint16(halfSign | uint32(0x7c00))
+ }
+
+ if halfExp <= 0 {
+ if 14-halfExp > 24 {
+ return uint16(halfSign)
+ }
+ coef := coef | uint32(0x00800000)
+ halfCoef := coef >> uint32(14-halfExp)
+ roundBit := uint32(1) << uint32(13-halfExp)
+ if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
+ halfCoef++
+ }
+ return uint16(halfSign | halfCoef)
+ }
+
+ uHalfExp := uint32(halfExp) << 10
+ halfCoef := coef >> 13
+ roundBit := uint32(0x00001000)
+ if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
+ return uint16((halfSign | uHalfExp | halfCoef) + 1)
+ }
+ return uint16(halfSign | uHalfExp | halfCoef)
+}
diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version
index f124bfa15544..013173af5e9b 100644
--- a/vendor/go.etcd.io/bbolt/.go-version
+++ b/vendor/go.etcd.io/bbolt/.go-version
@@ -1 +1 @@
-1.21.9
+1.22.6
diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile
index 18154c638823..21407797416e 100644
--- a/vendor/go.etcd.io/bbolt/Makefile
+++ b/vendor/go.etcd.io/bbolt/Makefile
@@ -41,6 +41,15 @@ coverage:
TEST_FREELIST_TYPE=array go test -v -timeout 30m \
-coverprofile cover-freelist-array.out -covermode atomic
+BOLT_CMD=bbolt
+
+build:
+ go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
+
+.PHONY: clean
+clean: # Clean binaries
+ rm -f ./bin/${BOLT_CMD}
+
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
@@ -61,3 +70,7 @@ test-failpoint:
@echo "[failpoint] array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
+.PHONY: test-robustness # Running robustness tests requires root permission
+test-robustness:
+ go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
+ go test -v ${TESTFLAGS} ./tests/robustness -test.root
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
index 4175bdf3dde9..822798e41a5b 100644
--- a/vendor/go.etcd.io/bbolt/db.go
+++ b/vendor/go.etcd.io/bbolt/db.go
@@ -524,7 +524,7 @@ func (db *DB) munmap() error {
// gofail: var unmapError string
// return errors.New(unmapError)
if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
+ return fmt.Errorf("unmap error: %v", err.Error())
}
return nil
@@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error {
// gofail: var munlockError string
// return errors.New(munlockError)
if err := munlock(db, fileSize); err != nil {
- return fmt.Errorf("munlock error: " + err.Error())
+ return fmt.Errorf("munlock error: %v", err.Error())
}
return nil
}
@@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error {
// gofail: var mlockError string
// return errors.New(mlockError)
if err := mlock(db, fileSize); err != nil {
- return fmt.Errorf("mlock error: " + err.Error())
+ return fmt.Errorf("mlock error: %v", err.Error())
}
return nil
}
@@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error {
// /~https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if runtime.GOOS != "windows" {
+ // gofail: var resizeFileError string
+ // return errors.New(resizeFileError)
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go
index 61d43f81b46d..dffc7bc749b5 100644
--- a/vendor/go.etcd.io/bbolt/freelist.go
+++ b/vendor/go.etcd.io/bbolt/freelist.go
@@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) {
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(f.pending, txid)
+
+ // Remove pgids which are allocated by this txid
+ for pgid, tid := range f.allocs {
+ if tid == txid {
+ delete(f.allocs, pgid)
+ }
+ }
+
f.mergeSpans(m)
}
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
index 2fac8c0a7820..766395de3be7 100644
--- a/vendor/go.etcd.io/bbolt/tx.go
+++ b/vendor/go.etcd.io/bbolt/tx.go
@@ -1,6 +1,7 @@
package bbolt
import (
+ "errors"
"fmt"
"io"
"os"
@@ -185,6 +186,10 @@ func (tx *Tx) Commit() error {
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
+ _ = errors.New("")
+ // gofail: var lackOfDiskSpace string
+ // tx.rollback()
+ // return errors.New(lackOfDiskSpace)
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
@@ -470,6 +475,7 @@ func (tx *Tx) write() error {
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncDataPages struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
@@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncMetaPage struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
index d9b91a24b17b..ab091cf6ade3 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -1,53 +1,59 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import (
+ "google.golang.org/grpc/stats"
+
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
"go.opentelemetry.io/otel/propagation"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
const (
- // instrumentationName is the name of this instrumentation package.
- instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ // ScopeName is the instrumentation scope name.
+ ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
)
-// Filter is a predicate used to determine whether a given request in
-// interceptor info should be traced. A Filter must return true if
+// InterceptorFilter is a predicate used to determine whether a given request in
+// interceptor info should be instrumented. A InterceptorFilter must return true if
// the request should be traced.
-type Filter func(*InterceptorInfo) bool
+//
+// Deprecated: Use stats handlers instead.
+type InterceptorFilter func(*InterceptorInfo) bool
+
+// Filter is a predicate used to determine whether a given request in
+// should be instrumented by the attached RPC tag info.
+// A Filter must return true if the request should be instrumented.
+type Filter func(*stats.RPCTagInfo) bool
// config is a group of options for this instrumentation.
type config struct {
- Filter Filter
- Propagators propagation.TextMapPropagator
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
- SpanStartOptions []trace.SpanStartOption
+ Filter Filter
+ InterceptorFilter InterceptorFilter
+ Propagators propagation.TextMapPropagator
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ SpanStartOptions []trace.SpanStartOption
ReceivedEvent bool
SentEvent bool
- meter metric.Meter
- rpcServerDuration metric.Int64Histogram
+ tracer trace.Tracer
+ meter metric.Meter
+
+ rpcDuration metric.Float64Histogram
+ rpcRequestSize metric.Int64Histogram
+ rpcResponseSize metric.Int64Histogram
+ rpcRequestsPerRPC metric.Int64Histogram
+ rpcResponsesPerRPC metric.Int64Histogram
}
// Option applies an option value for a config.
@@ -56,7 +62,7 @@ type Option interface {
}
// newConfig returns a config configured with all the passed Options.
-func newConfig(opts []Option) *config {
+func newConfig(opts []Option, role string) *config {
c := &config{
Propagators: otel.GetTextMapPropagator(),
TracerProvider: otel.GetTracerProvider(),
@@ -66,17 +72,66 @@ func newConfig(opts []Option) *config {
o.apply(c)
}
+ c.tracer = c.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
c.meter = c.MeterProvider.Meter(
- instrumentationName,
+ ScopeName,
metric.WithInstrumentationVersion(Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
+
var err error
- c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration",
+ c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
metric.WithDescription("Measures the duration of inbound RPC."),
metric.WithUnit("ms"))
if err != nil {
otel.Handle(err)
+ if c.rpcDuration == nil {
+ c.rpcDuration = noop.Float64Histogram{}
+ }
+ }
+
+ c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
+ metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcRequestSize == nil {
+ c.rpcRequestSize = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
+ metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcResponseSize == nil {
+ c.rpcResponseSize = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcRequestsPerRPC == nil {
+ c.rpcRequestsPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcResponsesPerRPC == nil {
+ c.rpcResponsesPerRPC = noop.Int64Histogram{}
+ }
}
return c
@@ -105,15 +160,32 @@ func (o tracerProviderOption) apply(c *config) {
}
// WithInterceptorFilter returns an Option to use the request filter.
-func WithInterceptorFilter(f Filter) Option {
+//
+// Deprecated: Use stats handlers instead.
+func WithInterceptorFilter(f InterceptorFilter) Option {
return interceptorFilterOption{f: f}
}
type interceptorFilterOption struct {
- f Filter
+ f InterceptorFilter
}
func (o interceptorFilterOption) apply(c *config) {
+ if o.f != nil {
+ c.InterceptorFilter = o.f
+ }
+}
+
+// WithFilter returns an Option to use the request filter.
+func WithFilter(f Filter) Option {
+ return filterOption{f: f}
+}
+
+type filterOption struct {
+ f Filter
+}
+
+func (o filterOption) apply(c *config) {
if o.f != nil {
c.Filter = o.f
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
index a993e0fc9214..b8b836b00fb1 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
@@ -1,45 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
-Package otelgrpc is the instrumentation library for [google.golang.org/grpc]
+Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
-For now you can instrument your program which use [google.golang.org/grpc] in two ways:
+Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
- - by [grpc.UnaryClientInterceptor], [grpc.UnaryServerInterceptor], [grpc.StreamClientInterceptor], [grpc.StreamServerInterceptor]
- - by [stats.Handler]
-
-Notice: Do not use both interceptors and [stats.Handler] at the same time! If so, you will get duplicated spans and the parent/child relationships between spans will also be broken.
-
-We strongly still recommand you to use [stats.Handler], mainly for two reasons:
-
-Functional advantages: [stats.Handler] has more information for user to build more flexible and granular metric, for example
-
- - multiple different types of represent "data length": In [stats.InPayload], there exists "Length", "CompressedLength", "WireLength" to denote the size of uncompressed, compressed payload data, with or without framing data. But in interceptors, we can only got uncompressed data, and this feature is also removed due to performance problem.
-
- - more accurate timestamp: [stats.InPayload]'s "RecvTime" and [stats.OutPayload]'s "SentTime" records more accurate timestamp that server got and sent the message, the timestamp recorded by interceptors depends on the location of this interceptors in the total interceptor chain.
-
- - some other use cases: for example, catch failure of decoding message.
-
-Performance advantages: If too many interceptors are registered in a service, the interceptor chain can become too long, which increases the latency and processing time of the entire RPC call.
-
-[stats.Handler]: https://pkg.go.dev/google.golang.org/grpc/stats#Handler
-[grpc.UnaryClientInterceptor]: https://pkg.go.dev/google.golang.org/grpc#UnaryClientInterceptor
-[grpc.UnaryServerInterceptor]: https://pkg.go.dev/google.golang.org/grpc#UnaryServerInterceptor
-[grpc.StreamClientInterceptor]: https://pkg.go.dev/google.golang.org/grpc#StreamClientInterceptor
-[grpc.StreamServerInterceptor]: https://pkg.go.dev/google.golang.org/grpc#StreamServerInterceptor
-[stats.OutPayload]: https://pkg.go.dev/google.golang.org/grpc/stats#OutPayload
-[stats.InPayload]: https://pkg.go.dev/google.golang.org/grpc/stats#InPayload
+Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
*/
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
index 561154061fed..7d5ed058082a 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
@@ -18,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// /~https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
import (
"context"
+ "errors"
"io"
"net"
"strconv"
@@ -59,11 +49,13 @@ var (
)
// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
-// for use in a grpc.Dial call.
+// for use in a grpc.NewClient call.
+//
+// Deprecated: Use [NewClientHandler] instead.
func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
- cfg := newConfig(opts)
+ cfg := newConfig(opts, "client")
tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
+ ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@@ -79,15 +71,16 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
Method: method,
Type: UnaryClient,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return invoker(ctx, method, req, reply, cc, callOpts...)
}
- name, attr := spanInfo(method, cc.Target())
+ name, attr, _ := telemetryAttributes(method, cc.Target())
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attr...)},
+ trace.WithAttributes(attr...),
+ },
cfg.SpanStartOptions...,
)
@@ -122,27 +115,13 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
}
}
-type streamEventType int
-
-type streamEvent struct {
- Type streamEventType
- Err error
-}
-
-const (
- receiveEndEvent streamEventType = iota
- errorEvent
-)
-
// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
// SendMsg method call.
type clientStream struct {
grpc.ClientStream
+ desc *grpc.StreamDesc
- desc *grpc.StreamDesc
- events chan streamEvent
- eventsDone chan struct{}
- finished chan error
+ span trace.Span
receivedEvent bool
sentEvent bool
@@ -157,11 +136,11 @@ func (w *clientStream) RecvMsg(m interface{}) error {
err := w.ClientStream.RecvMsg(m)
if err == nil && !w.desc.ServerStreams {
- w.sendStreamEvent(receiveEndEvent, nil)
- } else if err == io.EOF {
- w.sendStreamEvent(receiveEndEvent, nil)
+ w.endSpan(nil)
+ } else if errors.Is(err, io.EOF) {
+ w.endSpan(nil)
} else if err != nil {
- w.sendStreamEvent(errorEvent, err)
+ w.endSpan(err)
} else {
w.receivedMessageID++
@@ -183,7 +162,7 @@ func (w *clientStream) SendMsg(m interface{}) error {
}
if err != nil {
- w.sendStreamEvent(errorEvent, err)
+ w.endSpan(err)
}
return err
@@ -191,9 +170,8 @@ func (w *clientStream) SendMsg(m interface{}) error {
func (w *clientStream) Header() (metadata.MD, error) {
md, err := w.ClientStream.Header()
-
if err != nil {
- w.sendStreamEvent(errorEvent, err)
+ w.endSpan(err)
}
return md, err
@@ -201,64 +179,43 @@ func (w *clientStream) Header() (metadata.MD, error) {
func (w *clientStream) CloseSend() error {
err := w.ClientStream.CloseSend()
-
if err != nil {
- w.sendStreamEvent(errorEvent, err)
+ w.endSpan(err)
}
return err
}
-func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, cfg *config) *clientStream {
- events := make(chan streamEvent)
- eventsDone := make(chan struct{})
- finished := make(chan error)
-
- go func() {
- defer close(eventsDone)
-
- for {
- select {
- case event := <-events:
- switch event.Type {
- case receiveEndEvent:
- finished <- nil
- return
- case errorEvent:
- finished <- event.Err
- return
- }
- case <-ctx.Done():
- finished <- ctx.Err()
- return
- }
- }
- }()
-
+func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
return &clientStream{
ClientStream: s,
+ span: span,
desc: desc,
- events: events,
- eventsDone: eventsDone,
- finished: finished,
receivedEvent: cfg.ReceivedEvent,
sentEvent: cfg.SentEvent,
}
}
-func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) {
- select {
- case <-w.eventsDone:
- case w.events <- streamEvent{Type: eventType, Err: err}:
+func (w *clientStream) endSpan(err error) {
+ if err != nil {
+ s, _ := status.FromError(err)
+ w.span.SetStatus(codes.Error, s.Message())
+ w.span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
}
+
+ w.span.End()
}
// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
-// for use in a grpc.Dial call.
+// for use in a grpc.NewClient call.
+//
+// Deprecated: Use [NewClientHandler] instead.
func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
- cfg := newConfig(opts)
+ cfg := newConfig(opts, "client")
tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
+ ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@@ -274,15 +231,16 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
Method: method,
Type: StreamClient,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return streamer(ctx, desc, cc, method, callOpts...)
}
- name, attr := spanInfo(method, cc.Target())
+ name, attr, _ := telemetryAttributes(method, cc.Target())
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attr...)},
+ trace.WithAttributes(attr...),
+ },
cfg.SpanStartOptions...,
)
@@ -302,32 +260,19 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
span.End()
return s, err
}
- stream := wrapClientStream(ctx, s, desc, cfg)
-
- go func() {
- err := <-stream.finished
-
- if err != nil {
- s, _ := status.FromError(err)
- span.SetStatus(codes.Error, s.Message())
- span.SetAttributes(statusCodeAttr(s.Code()))
- } else {
- span.SetAttributes(statusCodeAttr(grpc_codes.OK))
- }
-
- span.End()
- }()
-
+ stream := wrapClientStream(s, desc, span, cfg)
return stream, nil
}
}
// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
- cfg := newConfig(opts)
+ cfg := newConfig(opts, "server")
tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
+ ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@@ -341,16 +286,17 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
UnaryServerInfo: info,
Type: UnaryServer,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return handler(ctx, req)
}
ctx = extract(ctx, cfg.Propagators)
- name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
+ name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attr...)},
+ trace.WithAttributes(attr...),
+ },
cfg.SpanStartOptions...,
)
@@ -365,30 +311,30 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
messageReceived.Event(ctx, 1, req)
}
- var statusCode grpc_codes.Code
- defer func(t time.Time) {
- elapsedTime := time.Since(t) / time.Millisecond
- attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode)))
- o := metric.WithAttributes(attr...)
- cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), o)
- }(time.Now())
+ before := time.Now()
resp, err := handler(ctx, req)
+
+ s, _ := status.FromError(err)
if err != nil {
- s, _ := status.FromError(err)
statusCode, msg := serverStatus(s)
span.SetStatus(statusCode, msg)
- span.SetAttributes(statusCodeAttr(s.Code()))
if cfg.SentEvent {
messageSent.Event(ctx, 1, s.Proto())
}
} else {
- statusCode = grpc_codes.OK
- span.SetAttributes(statusCodeAttr(grpc_codes.OK))
if cfg.SentEvent {
messageSent.Event(ctx, 1, resp)
}
}
+ grpcStatusCodeAttr := statusCodeAttr(s.Code())
+ span.SetAttributes(grpcStatusCodeAttr)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
+
+ metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
+ cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
return resp, err
}
@@ -446,10 +392,12 @@ func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *s
// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
- cfg := newConfig(opts)
+ cfg := newConfig(opts, "server")
tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
+ ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@@ -464,16 +412,17 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
StreamServerInfo: info,
Type: StreamServer,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return handler(srv, wrapServerStream(ctx, ss, cfg))
}
ctx = extract(ctx, cfg.Propagators)
- name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
+ name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attr...)},
+ trace.WithAttributes(attr...),
+ },
cfg.SpanStartOptions...,
)
@@ -498,17 +447,18 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
}
}
-// spanInfo returns a span name and all appropriate attributes from the gRPC
-// method and peer address.
-func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) {
- name, mAttrs := internal.ParseFullMethod(fullMethod)
+// telemetryAttributes returns a span name and span and metric attributes from
+// the gRPC method and peer address.
+func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
+ name, methodAttrs := internal.ParseFullMethod(fullMethod)
peerAttrs := peerAttr(peerAddress)
- attrs := make([]attribute.KeyValue, 0, 1+len(mAttrs)+len(peerAttrs))
+ attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
attrs = append(attrs, RPCSystemGRPC)
- attrs = append(attrs, mAttrs...)
+ attrs = append(attrs, methodAttrs...)
+ metricAttrs := attrs[:1+len(methodAttrs)]
attrs = append(attrs, peerAttrs...)
- return name, attrs
+ return name, attrs, metricAttrs
}
// peerAttr returns attributes about the peer address.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
index f6116946bfd8..b62f7cd7c468 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
index cf32a9e978c3..bef07b7a3ca7 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
index d91c6df2370e..3aa37915df22 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
@@ -56,7 +45,7 @@ func (s *metadataSupplier) Keys() []string {
// requests.
// Deprecated: Unnecessary public func.
func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
- c := newConfig(opts)
+ c := newConfig(opts, "")
c.Propagators.Inject(ctx, &metadataSupplier{
metadata: md,
})
@@ -78,7 +67,7 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont
// This function is meant to be used on incoming requests.
// Deprecated: Unnecessary public func.
func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
- c := newConfig(opts)
+ c := newConfig(opts, "")
ctx = c.Propagators.Extract(ctx, &metadataSupplier{
metadata: md,
})
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
index b65fab308f3c..409c621b74c8 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
index c64a53443bc5..201867a86944 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -1,29 +1,22 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import (
"context"
"sync/atomic"
+ "time"
grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
@@ -33,24 +26,30 @@ type gRPCContextKey struct{}
type gRPCContext struct {
messagesReceived int64
messagesSent int64
+ metricAttrs []attribute.KeyValue
+ record bool
}
-// NewServerHandler creates a stats.Handler for gRPC server.
+type serverHandler struct {
+ *config
+}
+
+// NewServerHandler creates a stats.Handler for a gRPC server.
func NewServerHandler(opts ...Option) stats.Handler {
h := &serverHandler{
- config: newConfig(opts),
+ config: newConfig(opts, "server"),
}
- h.tracer = h.config.TracerProvider.Tracer(
- instrumentationName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
return h
}
-type serverHandler struct {
- *config
- tracer trace.Tracer
+// TagConn can attach some information to the given context.
+func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
}
// TagRPC can attach some information to the given context.
@@ -66,46 +65,35 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
trace.WithAttributes(attrs...),
)
- gctx := gRPCContext{}
+ gctx := gRPCContext{
+ metricAttrs: attrs,
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
+ }
return context.WithValue(ctx, gRPCContextKey{}, &gctx)
}
// HandleRPC processes the RPC stats.
func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- handleRPC(ctx, rs)
-}
-
-// TagConn can attach some information to the given context.
-func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
- span := trace.SpanFromContext(ctx)
- attrs := peerAttr(peerFromCtx(ctx))
- span.SetAttributes(attrs...)
- return ctx
+ isServer := true
+ h.handleRPC(ctx, rs, isServer)
}
-// HandleConn processes the Conn stats.
-func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
+type clientHandler struct {
+ *config
}
-// NewClientHandler creates a stats.Handler for gRPC client.
+// NewClientHandler creates a stats.Handler for a gRPC client.
func NewClientHandler(opts ...Option) stats.Handler {
h := &clientHandler{
- config: newConfig(opts),
+ config: newConfig(opts, "client"),
}
- h.tracer = h.config.TracerProvider.Tracer(
- instrumentationName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
-
return h
}
-type clientHandler struct {
- *config
- tracer trace.Tracer
-}
-
// TagRPC can attach some information to the given context.
func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
name, attrs := internal.ParseFullMethod(info.FullMethodName)
@@ -117,21 +105,25 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
trace.WithAttributes(attrs...),
)
- gctx := gRPCContext{}
+ gctx := gRPCContext{
+ metricAttrs: attrs,
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
+ }
return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
}
// HandleRPC processes the RPC stats.
func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- handleRPC(ctx, rs)
+ isServer := false
+ h.handleRPC(ctx, rs, isServer)
}
// TagConn can attach some information to the given context.
-func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- span := trace.SpanFromContext(ctx)
- attrs := peerAttr(cti.RemoteAddr.String())
- span.SetAttributes(attrs...)
+func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
return ctx
}
@@ -140,47 +132,90 @@ func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
// no-op
}
-func handleRPC(ctx context.Context, rs stats.RPCStats) {
+func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag.
span := trace.SpanFromContext(ctx)
- gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
+ var metricAttrs []attribute.KeyValue
var messageId int64
+ gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
+ if gctx != nil {
+ if !gctx.record {
+ return
+ }
+ metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
+ metricAttrs = append(metricAttrs, gctx.metricAttrs...)
+ }
+
switch rs := rs.(type) {
case *stats.Begin:
case *stats.InPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
+ c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
+ }
+
+ if c.ReceivedEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeReceived,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
}
- span.AddEvent("message",
- trace.WithAttributes(
- semconv.MessageTypeReceived,
- semconv.MessageIDKey.Int64(messageId),
- semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
- semconv.MessageUncompressedSizeKey.Int(rs.Length),
- ),
- )
case *stats.OutPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.messagesSent, 1)
+ c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
}
- span.AddEvent("message",
- trace.WithAttributes(
- semconv.MessageTypeSent,
- semconv.MessageIDKey.Int64(messageId),
- semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
- semconv.MessageUncompressedSizeKey.Int(rs.Length),
- ),
- )
+ if c.SentEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeSent,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutTrailer:
+ case *stats.OutHeader:
+ if p, ok := peer.FromContext(ctx); ok {
+ span.SetAttributes(peerAttr(p.Addr.String())...)
+ }
case *stats.End:
+ var rpcStatusAttr attribute.KeyValue
+
if rs.Error != nil {
s, _ := status.FromError(rs.Error)
- span.SetStatus(codes.Error, s.Message())
- span.SetAttributes(statusCodeAttr(s.Code()))
+ if isServer {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ } else {
+ span.SetStatus(codes.Error, s.Message())
+ }
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
} else {
- span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
}
+ span.SetAttributes(rpcStatusAttr)
span.End()
+
+ metricAttrs = append(metricAttrs, rpcStatusAttr)
+ // Allocate vararg slice once.
+ recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))}
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ // Measure right before calling Record() to capture as much elapsed time as possible.
+ elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
+
+ c.rpcDuration.Record(ctx, elapsedTime, recordOpts...)
+ if gctx != nil {
+ c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...)
+ c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...)
+ }
default:
return
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
index 7a8ecebf0440..a15d06cb0c45 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -1,22 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// Version is the current release version of the gRPC instrumentation.
func Version() string {
- return "0.45.0"
+ return "0.53.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
new file mode 100644
index 000000000000..261eeb9e9f8b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
new file mode 100644
index 000000000000..6aae83bfd208
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
+// Please be careful of initialization order - for example, if you change
+// the global propagator, the DefaultClient might still be using the old one.
+var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
+
+// Get is a convenient replacement for http.Get that adds a span around the request.
+func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Head is a convenient replacement for http.Head that adds a span around the request.
+func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Post is a convenient replacement for http.Post that adds a span around the request.
+func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", contentType)
+ return DefaultClient.Do(req)
+}
+
+// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
+func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
+ return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
new file mode 100644
index 000000000000..214acaf581ef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Attribute keys that can be added to a span.
+const (
+ ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
+ ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
+ WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
+ WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
+)
+
+// Server HTTP metrics.
+const (
+ serverRequestSize = "http.server.request.size" // Incoming request bytes total
+ serverResponseSize = "http.server.response.size" // Incoming response bytes total
+ serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
+)
+
+// Client HTTP metrics.
+const (
+ clientRequestSize = "http.client.request.size" // Outgoing request bytes total
+ clientResponseSize = "http.client.response.size" // Outgoing response bytes total
+ clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
+)
+
+// Filter is a predicate used to determine whether a given http.request should
+// be traced. A Filter must return true if the request should be traced.
+type Filter func(*http.Request) bool
+
+func newTracer(tp trace.TracerProvider) trace.Tracer {
+ return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
new file mode 100644
index 000000000000..f0a9bb9efeb5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -0,0 +1,196 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptrace"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// ScopeName is the instrumentation scope name.
+const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// config represents the configuration options available for the http.Handler
+// and http.Transport types.
+type config struct {
+ ServerName string
+ Tracer trace.Tracer
+ Meter metric.Meter
+ Propagators propagation.TextMapPropagator
+ SpanStartOptions []trace.SpanStartOption
+ PublicEndpoint bool
+ PublicEndpointFn func(*http.Request) bool
+ ReadEvent bool
+ WriteEvent bool
+ Filters []Filter
+ SpanNameFormatter func(string, *http.Request) string
+ ClientTrace func(context.Context) *httptrace.ClientTrace
+
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+}
+
+// Option interface used for setting optional config properties.
+type Option interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// newConfig creates a new config struct and applies opts to it.
+func newConfig(opts ...Option) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+
+ // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
+ if c.TracerProvider != nil {
+ c.Tracer = newTracer(c.TracerProvider)
+ }
+
+ c.Meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ )
+
+ return c
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.TracerProvider = provider
+ }
+ })
+}
+
+// WithMeterProvider specifies a meter provider to use for creating a meter.
+// If none is specified, the global provider is used.
+func WithMeterProvider(provider metric.MeterProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.MeterProvider = provider
+ }
+ })
+}
+
+// WithPublicEndpoint configures the Handler to link the span with an incoming
+// span context. If this option is not provided, then the association is a child
+// association instead of a link.
+func WithPublicEndpoint() Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpoint = true
+ })
+}
+
+// WithPublicEndpointFn runs with every request, and allows conditionally
+// configuring the Handler to link the span with an incoming span context. If
+// this option is not provided or returns false, then the association is a
+// child association instead of a link.
+// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
+func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpointFn = fn
+ })
+}
+
+// WithPropagators configures specific propagators. If this
+// option isn't specified, then the global TextMapPropagator is used.
+func WithPropagators(ps propagation.TextMapPropagator) Option {
+ return optionFunc(func(c *config) {
+ if ps != nil {
+ c.Propagators = ps
+ }
+ })
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return optionFunc(func(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, opts...)
+ })
+}
+
+// WithFilter adds a filter to the list of filters used by the handler.
+// If any filter indicates to exclude a request then the request will not be
+// traced. All filters must allow a request to be traced for a Span to be created.
+// If no filters are provided then all requests are traced.
+// Filters will be invoked for each processed request, it is advised to make them
+// simple and fast.
+func WithFilter(f Filter) Option {
+ return optionFunc(func(c *config) {
+ c.Filters = append(c.Filters, f)
+ })
+}
+
+type event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReadEvents event = iota
+ WriteEvents
+)
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
+// using the ReadBytesKey
+// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
+// using the WriteBytesKey
+func WithMessageEvents(events ...event) Option {
+ return optionFunc(func(c *config) {
+ for _, e := range events {
+ switch e {
+ case ReadEvents:
+ c.ReadEvent = true
+ case WriteEvents:
+ c.WriteEvent = true
+ }
+ }
+ })
+}
+
+// WithSpanNameFormatter takes a function that will be called on every
+// request and the returned string will become the Span Name.
+func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
+ return optionFunc(func(c *config) {
+ c.SpanNameFormatter = f
+ })
+}
+
+// WithClientTrace takes a function that returns client trace instance that will be
+// applied to the requests sent through the otelhttp Transport.
+func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
+ return optionFunc(func(c *config) {
+ c.ClientTrace = f
+ })
+}
+
+// WithServerName returns an Option that sets the name of the (virtual) server
+// handling requests.
+func WithServerName(server string) Option {
+ return optionFunc(func(c *config) {
+ c.ServerName = server
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
new file mode 100644
index 000000000000..56b24b982aef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
@@ -0,0 +1,7 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package otelhttp provides an http.Handler and functions that are intended
+// to be used to add tracing by wrapping existing handlers (with Handler) and
+// routes WithRouteTag.
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
new file mode 100644
index 000000000000..d01bdccf40dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/felixge/httpsnoop"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// middleware is an http middleware which wraps the next handler in a span.
+type middleware struct {
+ operation string
+ server string
+
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ readEvent bool
+ writeEvent bool
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ publicEndpoint bool
+ publicEndpointFn func(*http.Request) bool
+
+ traceSemconv semconv.HTTPServer
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ serverLatencyMeasure metric.Float64Histogram
+}
+
+func defaultHandlerFormatter(operation string, _ *http.Request) string {
+ return operation
+}
+
+// NewHandler wraps the passed handler in a span named after the operation and
+// enriches it with metrics.
+func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
+ return NewMiddleware(operation, opts...)(handler)
+}
+
+// NewMiddleware returns a tracing and metrics instrumentation middleware.
+// The handler returned by the middleware wraps a handler
+// in a span named after the operation and enriches it with metrics.
+func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
+ h := middleware{
+ operation: operation,
+
+ traceSemconv: semconv.NewHTTPServer(),
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
+ WithSpanNameFormatter(defaultHandlerFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ h.configure(c)
+ h.createMeasures()
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.serveHTTP(w, r, next)
+ })
+ }
+}
+
+func (h *middleware) configure(c *config) {
+ h.tracer = c.Tracer
+ h.meter = c.Meter
+ h.propagators = c.Propagators
+ h.spanStartOptions = c.SpanStartOptions
+ h.readEvent = c.ReadEvent
+ h.writeEvent = c.WriteEvent
+ h.filters = c.Filters
+ h.spanNameFormatter = c.SpanNameFormatter
+ h.publicEndpoint = c.PublicEndpoint
+ h.publicEndpointFn = c.PublicEndpointFn
+ h.server = c.ServerName
+}
+
+func handleErr(err error) {
+ if err != nil {
+ otel.Handle(err)
+ }
+}
+
+func (h *middleware) createMeasures() {
+ var err error
+ h.requestBytesCounter, err = h.meter.Int64Counter(
+ serverRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ h.responseBytesCounter, err = h.meter.Int64Counter(
+ serverResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ h.serverLatencyMeasure, err = h.meter.Float64Histogram(
+ serverDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of inbound HTTP requests."),
+ )
+ handleErr(err)
+}
+
+// serveHTTP sets up tracing and calls the given next http.Handler with the span
+// context injected into the request context.
+func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
+ requestStartTime := time.Now()
+ for _, f := range h.filters {
+ if !f(r) {
+ // Simply pass through to the handler if a filter rejects the request
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
+ opts := []trace.SpanStartOption{
+ trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
+ }
+
+ opts = append(opts, h.spanStartOptions...)
+ if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
+ opts = append(opts, trace.WithNewRoot())
+ // Linking incoming span context if any for public endpoint.
+ if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
+ opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
+ }
+ }
+
+ tracer := h.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
+ defer span.End()
+
+ readRecordFunc := func(int64) {}
+ if h.readEvent {
+ readRecordFunc = func(n int64) {
+ span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
+ }
+ }
+
+ var bw bodyWrapper
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
+ bw.ReadCloser = r.Body
+ bw.record = readRecordFunc
+ r.Body = &bw
+ }
+
+ writeRecordFunc := func(int64) {}
+ if h.writeEvent {
+ writeRecordFunc = func(n int64) {
+ span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
+ }
+ }
+
+ rww := &respWriterWrapper{
+ ResponseWriter: w,
+ record: writeRecordFunc,
+ ctx: ctx,
+ props: h.propagators,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
+
+ // Wrap w to use our ResponseWriter methods while also exposing
+ // other interfaces that w may implement (http.CloseNotifier,
+ // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
+
+ w = httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
+ return rww.Header
+ },
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return rww.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return rww.WriteHeader
+ },
+ Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
+ return rww.Flush
+ },
+ })
+
+ labeler, found := LabelerFromContext(ctx)
+ if !found {
+ ctx = ContextWithLabeler(ctx, labeler)
+ }
+
+ next.ServeHTTP(w, r.WithContext(ctx))
+
+ span.SetStatus(semconv.ServerStatus(rww.statusCode))
+ span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
+ StatusCode: rww.statusCode,
+ ReadBytes: bw.read.Load(),
+ ReadError: bw.err,
+ WriteBytes: rww.written,
+ WriteError: rww.err,
+ })...)
+
+ // Add metrics
+ attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
+ if rww.statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
+ }
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ addOpts := []metric.AddOption{o} // Allocate vararg slice once.
+ h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
+ h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
+}
+
+// WithRouteTag annotates spans and metrics with the provided route name
+// with HTTP route attribute.
+func WithRouteTag(route string, h http.Handler) http.Handler {
+ attr := semconv.NewHTTPServer().Route(route)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ span := trace.SpanFromContext(r.Context())
+ span.SetAttributes(attr)
+
+ labeler, _ := LabelerFromContext(r.Context())
+ labeler.Add(attr)
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
new file mode 100644
index 000000000000..3ec0ad00c81f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+)
+
+type ResponseTelemetry struct {
+ StatusCode int
+ ReadBytes int64
+ ReadError error
+ WriteBytes int64
+ WriteError error
+}
+
+type HTTPServer struct {
+ duplicate bool
+}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ if s.duplicate {
+ return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...)
+ }
+ return oldHTTPServer{}.RequestTraceAttrs(server, req)
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ if s.duplicate {
+ return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...)
+ }
+ return oldHTTPServer{}.ResponseTraceAttrs(resp)
+}
+
+// Route returns the attribute for the route.
+func (s HTTPServer) Route(route string) attribute.KeyValue {
+ return oldHTTPServer{}.Route(route)
+}
+
+func NewHTTPServer() HTTPServer {
+ env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
+ return HTTPServer{duplicate: env == "http/dup"}
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func ServerStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
new file mode 100644
index 000000000000..e7f293761bd0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -0,0 +1,91 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p)
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ name = strings.ToLower(name)
+ return name, version
+}
+
+var methodLookup = map[string]attribute.KeyValue{
+ http.MethodConnect: semconvNew.HTTPRequestMethodConnect,
+ http.MethodDelete: semconvNew.HTTPRequestMethodDelete,
+ http.MethodGet: semconvNew.HTTPRequestMethodGet,
+ http.MethodHead: semconvNew.HTTPRequestMethodHead,
+ http.MethodOptions: semconvNew.HTTPRequestMethodOptions,
+ http.MethodPatch: semconvNew.HTTPRequestMethodPatch,
+ http.MethodPost: semconvNew.HTTPRequestMethodPost,
+ http.MethodPut: semconvNew.HTTPRequestMethodPut,
+ http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
new file mode 100644
index 000000000000..c3e838aaa542
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
@@ -0,0 +1,74 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "errors"
+ "io"
+ "net/http"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+type oldHTTPServer struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ return semconvutil.HTTPServerRequest(server, req)
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ attributes := []attribute.KeyValue{}
+
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
+ }
+ if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
+ }
+ if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (o oldHTTPServer) Route(route string) attribute.KeyValue {
+ return semconv.HTTPRoute(route)
+}
+
+// HTTPStatusCode returns the attribute for the HTTP status code.
+// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
+func HTTPStatusCode(status int) attribute.KeyValue {
+ return semconv.HTTPStatusCode(status)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go
new file mode 100644
index 000000000000..0c5d4c4608a2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go
@@ -0,0 +1,197 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "net/http"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type newHTTPServer struct{}
+
+// TraceRequest returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ count := 3 // ServerAddress, Method, Scheme
+
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ count++
+ }
+
+ method, methodOriginal := n.method(req.Method)
+ if methodOriginal != (attribute.KeyValue{}) {
+ count++
+ }
+
+ scheme := n.scheme(req.TLS != nil)
+
+ if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ count++
+ if peerPort > 0 {
+ count++
+ }
+ }
+
+ useragent := req.UserAgent()
+ if useragent != "" {
+ count++
+ }
+
+ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP != "" {
+ count++
+ }
+
+ if req.URL != nil && req.URL.Path != "" {
+ count++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ count++
+ }
+ if protoVersion != "" {
+ count++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, count)
+ attrs = append(attrs,
+ semconvNew.ServerAddress(host),
+ method,
+ scheme,
+ )
+
+ if hostPort > 0 {
+ attrs = append(attrs, semconvNew.ServerPort(hostPort))
+ }
+ if methodOriginal != (attribute.KeyValue{}) {
+ attrs = append(attrs, methodOriginal)
+ }
+
+ if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, semconvNew.NetworkPeerAddress(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort))
+ }
+ }
+
+ if useragent := req.UserAgent(); useragent != "" {
+ attrs = append(attrs, semconvNew.UserAgentOriginal(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, semconvNew.ClientAddress(clientIP))
+ }
+
+ if req.URL != nil && req.URL.Path != "" {
+ attrs = append(attrs, semconvNew.URLPath(req.URL.Path))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ return attrs
+}
+
+func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+ if method == "" {
+ return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+ }
+ if attr, ok := methodLookup[method]; ok {
+ return attr, attribute.KeyValue{}
+ }
+
+ orig := semconvNew.HTTPRequestMethodOriginal(method)
+ if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+ return attr, orig
+ }
+ return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconvNew.URLScheme("https")
+ }
+ return semconvNew.URLScheme("http")
+}
+
+// TraceResponse returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ var count int
+
+ if resp.ReadBytes > 0 {
+ count++
+ }
+ if resp.WriteBytes > 0 {
+ count++
+ }
+ if resp.StatusCode > 0 {
+ count++
+ }
+
+ attributes := make([]attribute.KeyValue, 0, count)
+
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)),
+ )
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)),
+ )
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPResponseStatusCode(resp.StatusCode),
+ )
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (n newHTTPServer) Route(route string) attribute.KeyValue {
+ return semconvNew.HTTPRoute(route)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
new file mode 100644
index 000000000000..7aa5f99e8158
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+// Generate semconvutil package:
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
new file mode 100644
index 000000000000..a73bb06e90e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
@@ -0,0 +1,575 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/httpconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// HTTPClientResponse returns trace attributes for an HTTP response received by a
+// client from a server. It will return the following attributes if the related
+// values are defined in resp: "http.status.code",
+// "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...)
+func HTTPClientResponse(resp *http.Response) []attribute.KeyValue {
+ return hc.ClientResponse(resp)
+}
+
+// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "user_agent.original",
+// "http.request_content_length".
+func HTTPClientRequest(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequest(req)
+}
+
+// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the
+// related values are defined in req: "net.peer.port".
+func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequestMetrics(req)
+}
+
+// HTTPClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func HTTPClientStatus(code int) (codes.Code, string) {
+ return hc.ClientStatus(code)
+}
+
+// HTTPServerRequest returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if
+// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip".
+func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequest(server, req)
+}
+
+// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "net.host.name". The following attributes are returned if they related
+// values are defined in req: "net.host.port".
+func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequestMetrics(server, req)
+}
+
+// HTTPServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func HTTPServerStatus(code int) (codes.Code, string) {
+ return hc.ServerStatus(code)
+}
+
+// httpConv are the HTTP semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type httpConv struct {
+ NetConv *netConv
+
+ HTTPClientIPKey attribute.Key
+ HTTPMethodKey attribute.Key
+ HTTPRequestContentLengthKey attribute.Key
+ HTTPResponseContentLengthKey attribute.Key
+ HTTPRouteKey attribute.Key
+ HTTPSchemeHTTP attribute.KeyValue
+ HTTPSchemeHTTPS attribute.KeyValue
+ HTTPStatusCodeKey attribute.Key
+ HTTPTargetKey attribute.Key
+ HTTPURLKey attribute.Key
+ UserAgentOriginalKey attribute.Key
+}
+
+var hc = &httpConv{
+ NetConv: nc,
+
+ HTTPClientIPKey: semconv.HTTPClientIPKey,
+ HTTPMethodKey: semconv.HTTPMethodKey,
+ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
+ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
+ HTTPRouteKey: semconv.HTTPRouteKey,
+ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
+ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
+ HTTPTargetKey: semconv.HTTPTargetKey,
+ HTTPURLKey: semconv.HTTPURLKey,
+ UserAgentOriginalKey: semconv.UserAgentOriginalKey,
+}
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. The following attributes are returned if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.status_code int
+ http.response_content_length int
+ */
+ var n int
+ if resp.StatusCode > 0 {
+ n++
+ }
+ if resp.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
+ }
+ if resp.ContentLength > 0 {
+ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
+ }
+ return attrs
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "user_agent.original",
+// "http.request_content_length", "user_agent.original".
+func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ user_agent.original string
+ http.url string
+ net.peer.name string
+ net.peer.port int
+ http.request_content_length int
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response. See ClientResponse.
+ http.response_content_length This requires the response. See ClientResponse.
+ net.sock.family This requires the socket used.
+ net.sock.peer.addr This requires the socket used.
+ net.sock.peer.name This requires the socket used.
+ net.sock.peer.port This requires the socket used.
+ http.resend_count This is something outside of a single request.
+ net.protocol.name The value is the Request is ignored, and the go client will always use "http".
+ net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
+ */
+ n := 3 // URL, peer name, proto, and method.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+ if req.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, c.HTTPURLKey.String(u))
+
+ attrs = append(attrs, c.NetConv.PeerName(peer))
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if l := req.ContentLength; l > 0 {
+ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
+ }
+
+ return attrs
+}
+
+// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the related values
+// are defined in req: "net.peer.port".
+func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ net.peer.name string
+ net.peer.port int
+ */
+
+ n := 2 // method, peer name.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
+
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ return attrs
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if they
+// related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip",
+// "net.protocol.name", "net.protocol.version".
+func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ http.scheme string
+ net.host.name string
+ net.host.port int
+ net.sock.peer.addr string
+ net.sock.peer.port int
+ user_agent.original string
+ http.client_ip string
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ http.target string Note: doesn't include the query parameter.
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response.
+ http.request_content_length This requires the len() of body, which can mutate it.
+ http.response_content_length This requires the response.
+ http.route This is not available.
+ net.sock.peer.name This would require a DNS lookup.
+ net.sock.host.addr The request doesn't have access to the underlying socket.
+ net.sock.host.port The request doesn't have access to the underlying socket.
+
+ */
+ n := 4 // Method, scheme, proto, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ peer, peerPort := splitHostPort(req.RemoteAddr)
+ if peer != "" {
+ n++
+ if peerPort > 0 {
+ n++
+ }
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+
+ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP != "" {
+ n++
+ }
+
+ var target string
+ if req.URL != nil {
+ target = req.URL.Path
+ if target != "" {
+ n++
+ }
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+
+ if peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
+ }
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
+ }
+
+ if target != "" {
+ attrs = append(attrs, c.HTTPTargetKey.String(target))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+// ServerRequestMetrics returns metric attributes for an HTTP request received
+// by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "net.host.name". The following attributes are returned if they related
+// values are defined in req: "net.host.port".
+func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.scheme string
+ http.route string
+ http.method string
+ http.status_code int
+ net.host.name string
+ net.host.port int
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ */
+
+ n := 3 // Method, scheme, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.methodMetric(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+ if protoName != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+func (c *httpConv) method(method string) attribute.KeyValue {
+ if method == "" {
+ return c.HTTPMethodKey.String(http.MethodGet)
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) methodMetric(method string) attribute.KeyValue {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return c.HTTPSchemeHTTPS
+ }
+ return c.HTTPSchemeHTTP
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+// Return the request host and port from the first non-empty source.
+func firstHostPort(source ...string) (host string, port int) {
+ for _, hostport := range source {
+ host, port = splitHostPort(hostport)
+ if host != "" || port > 0 {
+ break
+ }
+ }
+ return
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
new file mode 100644
index 000000000000..a9a9226b39af
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -0,0 +1,205 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/netconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "net"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// NetTransport returns a trace attribute describing the transport protocol of the
+// passed network. See the net.Dial for information about acceptable network
+// values.
+func NetTransport(network string) attribute.KeyValue {
+ return nc.Transport(network)
+}
+
+// netConv are the network semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type netConv struct {
+ NetHostNameKey attribute.Key
+ NetHostPortKey attribute.Key
+ NetPeerNameKey attribute.Key
+ NetPeerPortKey attribute.Key
+ NetProtocolName attribute.Key
+ NetProtocolVersion attribute.Key
+ NetSockFamilyKey attribute.Key
+ NetSockPeerAddrKey attribute.Key
+ NetSockPeerPortKey attribute.Key
+ NetSockHostAddrKey attribute.Key
+ NetSockHostPortKey attribute.Key
+ NetTransportOther attribute.KeyValue
+ NetTransportTCP attribute.KeyValue
+ NetTransportUDP attribute.KeyValue
+ NetTransportInProc attribute.KeyValue
+}
+
+var nc = &netConv{
+ NetHostNameKey: semconv.NetHostNameKey,
+ NetHostPortKey: semconv.NetHostPortKey,
+ NetPeerNameKey: semconv.NetPeerNameKey,
+ NetPeerPortKey: semconv.NetPeerPortKey,
+ NetProtocolName: semconv.NetProtocolNameKey,
+ NetProtocolVersion: semconv.NetProtocolVersionKey,
+ NetSockFamilyKey: semconv.NetSockFamilyKey,
+ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
+ NetSockPeerPortKey: semconv.NetSockPeerPortKey,
+ NetSockHostAddrKey: semconv.NetSockHostAddrKey,
+ NetSockHostPortKey: semconv.NetSockHostPortKey,
+ NetTransportOther: semconv.NetTransportOther,
+ NetTransportTCP: semconv.NetTransportTCP,
+ NetTransportUDP: semconv.NetTransportUDP,
+ NetTransportInProc: semconv.NetTransportInProc,
+}
+
+func (c *netConv) Transport(network string) attribute.KeyValue {
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ return c.NetTransportTCP
+ case "udp", "udp4", "udp6":
+ return c.NetTransportUDP
+ case "unix", "unixgram", "unixpacket":
+ return c.NetTransportInProc
+ default:
+ // "ip:*", "ip4:*", and "ip6:*" all are considered other.
+ return c.NetTransportOther
+ }
+}
+
+// Host returns attributes for a network host address.
+func (c *netConv) Host(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.HostName(h))
+ if p > 0 {
+ attrs = append(attrs, c.HostPort(p))
+ }
+ return attrs
+}
+
+func (c *netConv) HostName(name string) attribute.KeyValue {
+ return c.NetHostNameKey.String(name)
+}
+
+func (c *netConv) HostPort(port int) attribute.KeyValue {
+ return c.NetHostPortKey.Int(port)
+}
+
+func family(network, address string) string {
+ switch network {
+ case "unix", "unixgram", "unixpacket":
+ return "unix"
+ default:
+ if ip := net.ParseIP(address); ip != nil {
+ if ip.To4() == nil {
+ return "inet6"
+ }
+ return "inet"
+ }
+ }
+ return ""
+}
+
+// Peer returns attributes for a network peer address.
+func (c *netConv) Peer(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.PeerName(h))
+ if p > 0 {
+ attrs = append(attrs, c.PeerPort(p))
+ }
+ return attrs
+}
+
+func (c *netConv) PeerName(name string) attribute.KeyValue {
+ return c.NetPeerNameKey.String(name)
+}
+
+func (c *netConv) PeerPort(port int) attribute.KeyValue {
+ return c.NetPeerPortKey.Int(port)
+}
+
+func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
+ return c.NetSockPeerAddrKey.String(addr)
+}
+
+func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
+ return c.NetSockPeerPortKey.Int(port)
+}
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p)
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ name = strings.ToLower(name)
+ return name, version
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
new file mode 100644
index 000000000000..ea504e396f13
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
+// the metrics recorded by the net/http instrumentation.
+type Labeler struct {
+ mu sync.Mutex
+ attributes []attribute.KeyValue
+}
+
+// Add attributes to a Labeler.
+func (l *Labeler) Add(ls ...attribute.KeyValue) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.attributes = append(l.attributes, ls...)
+}
+
+// Get returns a copy of the attributes added to the Labeler.
+func (l *Labeler) Get() []attribute.KeyValue {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ ret := make([]attribute.KeyValue, len(l.attributes))
+ copy(ret, l.attributes)
+ return ret
+}
+
+type labelerContextKeyType int
+
+const lablelerContextKey labelerContextKeyType = 0
+
+// ContextWithLabeler returns a new context with the provided Labeler instance.
+// Attributes added to the specified labeler will be injected into metrics
+// emitted by the instrumentation. Only one labeller can be injected into the
+// context. Injecting it multiple times will override the previous calls.
+func ContextWithLabeler(parent context.Context, l *Labeler) context.Context {
+ return context.WithValue(parent, lablelerContextKey, l)
+}
+
+// LabelerFromContext retrieves a Labeler instance from the provided context if
+// one is available. If no Labeler was found in the provided context a new, empty
+// Labeler is returned and the second return value is false. In this case it is
+// safe to use the Labeler but any attributes added to it will not be used.
+func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
+ l, ok := ctx.Value(lablelerContextKey).(*Labeler)
+ if !ok {
+ l = &Labeler{}
+ }
+ return l, ok
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
new file mode 100644
index 000000000000..0d3cb2e4aa4f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -0,0 +1,277 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/http/httptrace"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Transport implements the http.RoundTripper interface and wraps
+// outbound HTTP(S) requests with a span and enriches it with metrics.
+type Transport struct {
+ rt http.RoundTripper
+
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ clientTrace func(context.Context) *httptrace.ClientTrace
+
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ latencyMeasure metric.Float64Histogram
+}
+
+var _ http.RoundTripper = &Transport{}
+
+// NewTransport wraps the provided http.RoundTripper with one that
+// starts a span, injects the span context into the outbound request headers,
+// and enriches it with metrics.
+//
+// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
+// as the base http.RoundTripper.
+func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
+ if base == nil {
+ base = http.DefaultTransport
+ }
+
+ t := Transport{
+ rt: base,
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
+ WithSpanNameFormatter(defaultTransportFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ t.applyConfig(c)
+ t.createMeasures()
+
+ return &t
+}
+
+func (t *Transport) applyConfig(c *config) {
+ t.tracer = c.Tracer
+ t.meter = c.Meter
+ t.propagators = c.Propagators
+ t.spanStartOptions = c.SpanStartOptions
+ t.filters = c.Filters
+ t.spanNameFormatter = c.SpanNameFormatter
+ t.clientTrace = c.ClientTrace
+}
+
+func (t *Transport) createMeasures() {
+ var err error
+ t.requestBytesCounter, err = t.meter.Int64Counter(
+ clientRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ t.responseBytesCounter, err = t.meter.Int64Counter(
+ clientResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ t.latencyMeasure, err = t.meter.Float64Histogram(
+ clientDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of outbound HTTP requests."),
+ )
+ handleErr(err)
+}
+
+func defaultTransportFormatter(_ string, r *http.Request) string {
+ return "HTTP " + r.Method
+}
+
+// RoundTrip creates a Span and propagates its context via the provided request's headers
+// before handing the request to the configured base RoundTripper. The created span will
+// end when the response body is closed or when a read from the body returns io.EOF.
+func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
+ requestStartTime := time.Now()
+ for _, f := range t.filters {
+ if !f(r) {
+ // Simply pass through to the base RoundTripper if a filter rejects the request
+ return t.rt.RoundTrip(r)
+ }
+ }
+
+ tracer := t.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
+
+ ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
+
+ if t.clientTrace != nil {
+ ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
+ }
+
+ labeler, found := LabelerFromContext(ctx)
+ if !found {
+ ctx = ContextWithLabeler(ctx, labeler)
+ }
+
+ r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
+
+ // use a body wrapper to determine the request size
+ var bw bodyWrapper
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
+ bw.ReadCloser = r.Body
+ // noop to prevent nil panic. not using this record fun yet.
+ bw.record = func(int64) {}
+ r.Body = &bw
+ }
+
+ span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
+ t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
+
+ res, err := t.rt.RoundTrip(r)
+ if err != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ span.End()
+ return res, err
+ }
+
+ // metrics
+ metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
+ if res.StatusCode > 0 {
+ metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
+ }
+ o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...))
+ addOpts := []metric.AddOption{o} // Allocate vararg slice once.
+ t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
+ // For handling response bytes we leverage a callback when the client reads the http response
+ readRecordFunc := func(n int64) {
+ t.responseBytesCounter.Add(ctx, n, addOpts...)
+ }
+
+ // traces
+ span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
+ span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
+
+ res.Body = newWrappedBody(span, readRecordFunc, res.Body)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ t.latencyMeasure.Record(ctx, elapsedTime, o)
+
+ return res, err
+}
+
+// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
+// io.ReadCloser. If the passed body implements io.Writer, the returned value
+// will implement io.ReadWriteCloser.
+func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser {
+ // The successful protocol switch responses will have a body that
+ // implement an io.ReadWriteCloser. Ensure this interface type continues
+ // to be satisfied if that is the case.
+ if _, ok := body.(io.ReadWriteCloser); ok {
+ return &wrappedBody{span: span, record: record, body: body}
+ }
+
+ // Remove the implementation of the io.ReadWriteCloser and only implement
+ // the io.ReadCloser.
+ return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}}
+}
+
+// wrappedBody is the response body type returned by the transport
+// instrumentation to complete a span. Errors encountered when using the
+// response body are recorded in span tracking the response.
+//
+// The span tracking the response is ended when this body is closed.
+//
+// If the response body implements the io.Writer interface (i.e. for
+// successful protocol switches), the wrapped body also will.
+type wrappedBody struct {
+ span trace.Span
+ recorded atomic.Bool
+ record func(n int64)
+ body io.ReadCloser
+ read atomic.Int64
+}
+
+var _ io.ReadWriteCloser = &wrappedBody{}
+
+func (wb *wrappedBody) Write(p []byte) (int, error) {
+ // This will not panic given the guard in newWrappedBody.
+ n, err := wb.body.(io.Writer).Write(p)
+ if err != nil {
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+func (wb *wrappedBody) Read(b []byte) (int, error) {
+ n, err := wb.body.Read(b)
+ // Record the number of bytes read
+ wb.read.Add(int64(n))
+
+ switch err {
+ case nil:
+ // nothing to do here but fall through to the return
+ case io.EOF:
+ wb.recordBytesRead()
+ wb.span.End()
+ default:
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once.
+func (wb *wrappedBody) recordBytesRead() {
+ // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that
+ // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using
+ // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish
+ // calling wb.record(wb.read.Load()).
+ if wb.recorded.CompareAndSwap(false, true) {
+ // Record the total number of bytes read
+ wb.record(wb.read.Load())
+ }
+}
+
+func (wb *wrappedBody) Close() error {
+ wb.recordBytesRead()
+ wb.span.End()
+ if wb.body != nil {
+ return wb.body.Close()
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
new file mode 100644
index 000000000000..b0957f28cead
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// Version is the current release version of the otelhttp instrumentation.
+func Version() string {
+ return "0.53.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
new file mode 100644
index 000000000000..948f8406c09d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
@@ -0,0 +1,99 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/propagation"
+)
+
+var _ io.ReadCloser = &bodyWrapper{}
+
+// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type bodyWrapper struct {
+ io.ReadCloser
+ record func(n int64) // must not be nil
+
+ read atomic.Int64
+ err error
+}
+
+func (w *bodyWrapper) Read(b []byte) (int, error) {
+ n, err := w.ReadCloser.Read(b)
+ n1 := int64(n)
+ w.read.Add(n1)
+ w.err = err
+ w.record(n1)
+ return n, err
+}
+
+func (w *bodyWrapper) Close() error {
+ return w.ReadCloser.Close()
+}
+
+var _ http.ResponseWriter = &respWriterWrapper{}
+
+// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
+// that may be useful when using it in real life situations.
+type respWriterWrapper struct {
+ http.ResponseWriter
+ record func(n int64) // must not be nil
+
+ // used to inject the header
+ ctx context.Context
+
+ props propagation.TextMapPropagator
+
+ written int64
+ statusCode int
+ err error
+ wroteHeader bool
+}
+
+func (w *respWriterWrapper) Header() http.Header {
+ return w.ResponseWriter.Header()
+}
+
+func (w *respWriterWrapper) Write(p []byte) (int, error) {
+ if !w.wroteHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+ n, err := w.ResponseWriter.Write(p)
+ n1 := int64(n)
+ w.record(n1)
+ w.written += n1
+ w.err = err
+ return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *respWriterWrapper) WriteHeader(statusCode int) {
+ if !w.wroteHeader {
+ w.wroteHeader = true
+ w.statusCode = statusCode
+ }
+ w.ResponseWriter.WriteHeader(statusCode)
+}
+
+func (w *respWriterWrapper) Flush() {
+ if !w.wroteHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+
+ if f, ok := w.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index ae6a3bcf12c8..6bf3abc41e70 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -3,3 +3,7 @@ fo
te
collison
consequentially
+ans
+nam
+valu
+thirdparty
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
index 4afbb1fb3bd7..e2cb3ea944bb 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellrc
+++ b/vendor/go.opentelemetry.io/otel/.codespellrc
@@ -5,6 +5,6 @@ check-filenames =
check-hidden =
ignore-words = .codespellignore
interactive = 1
-skip = .git,go.mod,go.sum,semconv,venv,.tools
+skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools
uri-ignore-words-list = *
write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
deleted file mode 100644
index 38a1f56982ba..000000000000
--- a/vendor/go.opentelemetry.io/otel/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "opentelemetry-proto"]
- path = exporters/otlp/internal/opentelemetry-proto
- url = /~https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index a62511f382e2..6d9c8b64958b 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -11,6 +11,7 @@ linters:
enable:
- depguard
- errcheck
+ - errorlint
- godot
- gofumpt
- goimports
@@ -21,8 +22,11 @@ linters:
- misspell
- revive
- staticcheck
+ - tenv
- typecheck
+ - unconvert
- unused
+ - unparam
issues:
# Maximum issues count per one linter.
@@ -124,6 +128,8 @@ linters-settings:
- "**/example/**/*.go"
- "**/trace/*.go"
- "**/trace/**/*.go"
+ - "**/log/*.go"
+ - "**/log/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/internal$"
desc: Do not use cross-module internal packages.
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 24874f856e35..c01e6998e0b3 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,274 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.28.0/0.50.0/0.4.0] 2024-07-02
+
+### Added
+
+- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`.
+ This method is used to check if an `Instrument` instance is a zero-value. (#5431)
+- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468)
+- The `go.opentelemetry.io/otel/semconv/v1.26.0` package.
+ The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476)
+- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499)
+- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530)
+
+### Changed
+
+- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490)
+ - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes.
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490)
+- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493)
+- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497)
+- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520)
+- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545)
+
+### Fixed
+
+- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376)
+- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434)
+- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435)
+- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456)
+- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464)
+- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508)
+- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514)
+- Fix stale timestamps reported by the last-value aggregation. (#5517)
+- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
+- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
+
+## [1.27.0/0.49.0/0.3.0] 2024-05-21
+
+### Added
+
+- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263)
+- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276)
+- Add metrics in the `otel-collector` example. (#5283)
+- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304)
+ - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`.
+ - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument.
+- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349)
+
+### Changed
+
+- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189)
+- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189)
+- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241)
+- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272)
+- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286)
+- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305)
+- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315)
+- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374)
+
+### Fixed
+
+- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306)
+- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311)
+- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365)
+- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371)
+- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375)
+
+## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
+
+### Added
+
+- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
+- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
+- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
+ This new module contains the Go implementation of the OpenTelemetry Logs SDK.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
+ This new module contains an OTLP exporter that transmits log telemetry using HTTP.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
+ This new module contains an exporter prints log records to STDOUT.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
+ The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
+
+### Changed
+
+- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
+- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
+- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244)
+
+### Fixed
+
+- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159)
+
+## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
+
+### Added
+
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
+- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
+- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
+ This method is used to notify users if a log record will be emitted or not. (#5071)
+- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
+ This value represents an unset severity level. (#5072)
+- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
+- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
+ This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
+ At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
+- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
+- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
+- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
+- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
+- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
+- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
+
+### Changed
+
+- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
+ Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
+ Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
+
+### Fixed
+
+- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
+- Prevent default `ErrorHandler` self-delegation. (#5137)
+- Update all dependencies to address [GO-2024-2687]. (#5139)
+
+### Removed
+
+- Drop support for [Go 1.20]. (#4967)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
+
+## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
+
+This release is the last to support [Go 1.20].
+The next release will require at least [Go 1.21].
+
+### Added
+
+- Support [Go 1.22]. (#4890)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
+- The `go.opentelemetry.io/otel/log` module is added.
+ This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
+ This module is in an alpha state, it is subject to breaking changes.
+ See our [versioning policy](./VERSIONING.md) for more info. (#4961)
+- ARM64 platform to the compatibility testing suite. (#4994)
+
+### Fixed
+
+- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
+- Fix negative buckets in output of exponential histograms. (#4956)
+
+## [1.23.1] 2024-02-07
+
+### Fixed
+
+- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
+
+## [1.23.0] 2024-02-06
+
+This release contains the first stable, `v1`, release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
+- Experimental exemplar exporting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
+- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
+ This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
+
+### Changed
+
+- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
+ Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
+ It is up to the user to decide if they want to use the returned `Resource` or not.
+ It may have desired attributes overwritten or include stale semantic conventions. (#4876)
+
+### Fixed
+
+- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
+- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
+- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
+
+## [1.23.0-rc.1] 2024-01-18
+
+This is a release candidate for the v1.23.0 release.
+That release is expected to include the `v1` release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+## [1.22.0/0.45.0] 2024-01-17
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+ The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
+ The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
+- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
+ The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
+- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
+ The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+- Experimental cardinality limiting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
+- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
+
+### Changed
+
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
+- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
+- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
+ If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
+- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
+- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
+- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
+- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
+
+### Fixed
+
+- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
+- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
+- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
+- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
+- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
+- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
+
## [1.21.0/0.44.0] 2023-11-16
### Removed
@@ -24,7 +292,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [1.20.0/0.43.0] 2023-11-10
-This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
### Added
@@ -56,15 +324,15 @@ This release brings a breaking change for custom trace API implementations. Some
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
This extends the `Tracer` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
This extends the `Span` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
@@ -700,7 +968,7 @@ The next release will require at least [Go 1.19].
- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
- Re-enabled Attribute Filters in the Metric SDK. (#3396)
-- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408)
- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
- Prevent duplicate Prometheus description, unit, and type. (#3469)
@@ -1745,7 +2013,7 @@ with major version 0.
- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
- Unify endpoint API that related to OTel exporter. (#1401)
-- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435)
+- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435)
- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
@@ -2735,7 +3003,16 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: /~https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
+[Unreleased]: /~https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD
+[1.28.0/0.50.0/0.4.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
+[1.27.0/0.49.0/0.3.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
+[1.26.0/0.48.0/0.2.0-alpha]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
+[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
+[1.24.0/0.46.0/0.0.1-alpha]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
+[1.23.1]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
+[1.23.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
+[1.23.0-rc.1]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
+[1.22.0/0.45.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
[1.21.0/0.44.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
@@ -2809,6 +3086,8 @@ It contains api and sdk for trace and meter.
[0.1.1]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: /~https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+[Go 1.22]: https://go.dev/doc/go1.22
+[Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
@@ -2816,3 +3095,5 @@ It contains api and sdk for trace and meter.
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
+
+[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 623740007d4e..202554933230 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
-* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
-CODEOWNERS @MrAlias @MadVikingGod @pellared
\ No newline at end of file
+CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 850606ae6924..b86572f58ea5 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way:
[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
is an example of a very well-documented package.
+### README files
+
+Each (non-internal, non-test, non-documentation) package must contain a
+`README.md` file containing at least a title, and a `pkg.go.dev` badge.
+
+The README should not be a repetition of Go doc comments.
+
+You can verify the presence of all README files with the `make verify-readmes`
+command.
+
## Style Guide
One of the primary goals of this project is that it is actually used by
@@ -560,6 +570,9 @@ functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
+See also:
+[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces).
+
### Testing
The tests should never leak goroutines.
@@ -591,27 +604,48 @@ this.
[^3]: /~https://github.com/open-telemetry/opentelemetry-go/issues/3548
+### Ignoring context cancellation
+
+OpenTelemetry API implementations need to ignore the cancellation of the context that are
+passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
+Recording methods should not return an error describing the cancellation state of the context
+when they complete, nor should they abort any work.
+
+This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
+the method. In that case the context cancellation can be used for the timeout with the
+restriction that this behavior is documented for the method. Otherwise, timeouts
+are expected to be handled by the user calling the API, not the implementation.
+
+Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
+of a provider. It is assumed the context passed from a user is not used for this purpose.
+
+Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
+force flushing telemetry, shutting down a signal provider) the context cancellation
+should be honored. This means all work done on behalf of the user provided context
+should be canceled.
+
## Approvers and Maintainers
### Approvers
-- [Evan Torrie](/~https://github.com/evantorrie), Verizon Media
-- [Sam Xie](/~https://github.com/XSAM), Cisco/AppDynamics
-- [David Ashpole](/~https://github.com/dashpole), Google
- [Chester Cheung](/~https://github.com/hanyuancheung), Tencent
-- [Damien Mathieu](/~https://github.com/dmathieu), Elastic
-- [Anthony Mirabella](/~https://github.com/Aneurysm9), AWS
### Maintainers
- [Aaron Clawson](/~https://github.com/MadVikingGod), LightStep
+- [Damien Mathieu](/~https://github.com/dmathieu), Elastic
+- [David Ashpole](/~https://github.com/dashpole), Google
- [Robert Pająk](/~https://github.com/pellared), Splunk
+- [Sam Xie](/~https://github.com/XSAM), Cisco/AppDynamics
- [Tyler Yahn](/~https://github.com/MrAlias), Splunk
### Emeritus
+- [Liz Fong-Jones](/~https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](/~https://github.com/paivagustavo), LightStep
- [Josh MacDonald](/~https://github.com/jmacd), LightStep
+- [Anthony Mirabella](/~https://github.com/Aneurysm9), AWS
+- [Evan Torrie](/~https://github.com/evantorrie), Yahoo
### Become an Approver or a Maintainer
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 35fc189961b6..f33619f76a2b 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -1,16 +1,5 @@
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
TOOLS_MOD_DIR := ./internal/tools
@@ -25,8 +14,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
-precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
-ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
+precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
+ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
# Tools
@@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools
$(TOOLS):
@mkdir -p $@
-$(TOOLS)/%: | $(TOOLS)
+$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \
$(GO) build -o $@ $(PACKAGE)
@@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
-DBOTCONF = $(TOOLS)/dbotconf
-$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
-
GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
@@ -81,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -110,7 +96,7 @@ $(PYTOOLS):
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
# Install python packages into the virtual environment.
-$(PYTOOLS)/%: | $(PYTOOLS)
+$(PYTOOLS)/%: $(PYTOOLS)
@$(DOCKERPY) $(PIP) install -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
@@ -124,18 +110,18 @@ generate: go-generate vanity-import-fix
.PHONY: go-generate
go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
go-generate/%: DIR=$*
-go-generate/%: | $(STRINGER) $(GOTMPL)
+go-generate/%: $(STRINGER) $(GOTMPL)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
.PHONY: vanity-import-fix
-vanity-import-fix: | $(PORTO)
+vanity-import-fix: $(PORTO)
@$(PORTO) --include-internal -w .
# Generate go.work file for local development.
.PHONY: go-work
-go-work: | $(CROSSLINK)
+go-work: $(CROSSLINK)
$(CROSSLINK) work --root=$(shell pwd)
# Build
@@ -178,7 +164,7 @@ test/%:
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
.PHONY: test-coverage
-test-coverage: | $(GOCOVMERGE)
+test-coverage: $(GOCOVMERGE)
@set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
@@ -192,7 +178,7 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
-# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
+# Adding a directory will include all benchmarks in that directory if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
@@ -209,23 +195,23 @@ golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$*
-golangci-lint/%: | $(GOLANGCI_LINT)
+golangci-lint/%: $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink
-crosslink: | $(CROSSLINK)
+crosslink: $(CROSSLINK)
@echo "Updating intra-repository dependencies in all go modules" \
&& $(CROSSLINK) --root=$(shell pwd) --prune
.PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$*
-go-mod-tidy/%: | crosslink
+go-mod-tidy/%: crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
- && $(GO) mod tidy -compat=1.20
+ && $(GO) mod tidy -compat=1.21
.PHONY: lint-modules
lint-modules: go-mod-tidy
@@ -234,23 +220,23 @@ lint-modules: go-mod-tidy
lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check
-vanity-import-check: | $(PORTO)
+vanity-import-check: $(PORTO)
@$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
.PHONY: misspell
-misspell: | $(MISSPELL)
+misspell: $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: govulncheck
govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
govulncheck/%: DIR=$*
-govulncheck/%: | $(GOVULNCHECK)
+govulncheck/%: $(GOVULNCHECK)
@echo "govulncheck ./... in $(DIR)" \
&& cd $(DIR) \
&& $(GOVULNCHECK) ./...
.PHONY: codespell
-codespell: | $(CODESPELL)
+codespell: $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
.PHONY: license-check
@@ -263,15 +249,6 @@ license-check:
exit 1; \
fi
-DEPENDABOT_CONFIG = .github/dependabot.yml
-.PHONY: dependabot-check
-dependabot-check: | $(DBOTCONF)
- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
-
-.PHONY: dependabot-generate
-dependabot-generate: | $(DBOTCONF)
- @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
-
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
@@ -284,13 +261,11 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
-semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
+semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease
@@ -302,17 +277,25 @@ gorelease/%:| $(GORELEASE)
&& $(GORELEASE) \
|| echo ""
+.PHONY: verify-mods
+verify-mods: $(MULTIMOD)
+ $(MULTIMOD) verify
+
.PHONY: prerelease
-prerelease: | $(MULTIMOD)
+prerelease: verify-mods
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
+ $(MULTIMOD) prerelease -m ${MODSET}
COMMIT ?= "HEAD"
.PHONY: add-tags
-add-tags: | $(MULTIMOD)
+add-tags: verify-mods
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
+ $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
.PHONY: lint-markdown
-lint-markdown:
- docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
+lint-markdown:
+ docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
+
+.PHONY: verify-readmes
+verify-readmes:
+ ./verify_readmes.sh
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 2c5b0cc28ab1..5a8909317312 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -11,14 +11,11 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
-| Signal | Status |
-|---------|------------|
-| Traces | Stable |
-| Metrics | Stable |
-| Logs | Design [1] |
-
-- [1]: Currently the logs signal development is in a design phase ([#4696](/~https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
- No Logs Pull Requests are currently being accepted.
+| Signal | Status |
+|---------|--------------------|
+| Traces | Stable |
+| Metrics | Stable |
+| Logs | Beta[^1] |
Progress and status specific to this repository is tracked in our
[project boards](/~https://github.com/open-telemetry/opentelemetry-go/projects)
@@ -28,6 +25,8 @@ and
Project versioning information and stability guarantees can be found in the
[versioning documentation](VERSIONING.md).
+[^1]: /~https://github.com/orgs/open-telemetry/projects/43
+
### Compatibility
OpenTelemetry-Go ensures compatibility with the current supported versions of
@@ -50,23 +49,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|---------|------------|--------------|
+| Ubuntu | 1.22 | amd64 |
| Ubuntu | 1.21 | amd64 |
-| Ubuntu | 1.20 | amd64 |
+| Ubuntu | 1.22 | 386 |
| Ubuntu | 1.21 | 386 |
-| Ubuntu | 1.20 | 386 |
+| Linux | 1.22 | arm64 |
+| Linux | 1.21 | arm64 |
+| MacOS | 1.22 | amd64 |
| MacOS | 1.21 | amd64 |
-| MacOS | 1.20 | amd64 |
+| Windows | 1.22 | amd64 |
| Windows | 1.21 | amd64 |
-| Windows | 1.20 | amd64 |
+| Windows | 1.22 | 386 |
| Windows | 1.21 | 386 |
-| Windows | 1.20 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
## Getting Started
-You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability
@@ -96,12 +97,12 @@ export pipeline to send that telemetry to an observability platform.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
-| Exporter | Metrics | Traces |
-|---------------------------------------|:-------:|:------:|
-| [OTLP](./exporters/otlp/) | ✓ | ✓ |
-| [Prometheus](./exporters/prometheus/) | ✓ | |
-| [stdout](./exporters/stdout/) | ✓ | ✓ |
-| [Zipkin](./exporters/zipkin/) | | ✓ |
+| Exporter | Logs | Metrics | Traces |
+|---------------------------------------|:----:|:-------:|:------:|
+| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ |
+| [Prometheus](./exporters/prometheus/) | | ✓ | |
+| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ |
+| [Zipkin](./exporters/zipkin/) | | | ✓ |
## Contributing
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 82ce3ee46a15..940f57f3d87d 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
+## Verify changes for contrib repository
+
+If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository.
+
+Follow [the steps](/~https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes.
+
## Pre-Release
First, decide which module sets will be released and update their versions
@@ -123,12 +129,12 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
-Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry Semantic Conventions]: /~https://github.com/open-telemetry/semantic-conventions
-[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
-[content/en/docs/instrumentation/go]: /~https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
+[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
+[content/en/docs/languages/go]: /~https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
### Demo Repository
diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md
new file mode 100644
index 000000000000..5b3da8f14cab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/README.md
@@ -0,0 +1,3 @@
+# Attribute
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute)
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
index dafe7424dfbb..eef51ebc2a27 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/doc.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package attribute provides key and value attributes.
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index fe2bc5766cff..318e42fcabeb 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
index 638c213d59ab..be9cd922d87e 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
index 841b271fb7dd..f2ba89ce4bc2 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
index 0656a04e43bf..d9a22c650206 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
index 1ddf3ce05805..3028f9a40f8b 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 9f9303d4f15d..bff9c7fdbb96 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -1,24 +1,14 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
+ "cmp"
"encoding/json"
"reflect"
+ "slices"
"sort"
- "sync"
)
type (
@@ -26,23 +16,33 @@ type (
// immutable set of attributes, with an internal cache for storing
// attribute encodings.
//
- // This type supports the Equivalent method of comparison using values of
- // type Distinct.
+ // This type will remain comparable for backwards compatibility. The
+ // equivalence of Sets across versions is not guaranteed to be stable.
+ // Prior versions may find two Sets to be equal or not when compared
+ // directly (i.e. ==), but subsequent versions may not. Users should use
+ // the Equals method to ensure stable equivalence checking.
+ //
+ // Users should also use the Distinct returned from Equivalent as a map key
+ // instead of a Set directly. In addition to that type providing guarantees
+ // on stable equivalence, it may also provide performance improvements.
Set struct {
equivalent Distinct
}
- // Distinct wraps a variable-size array of KeyValue, constructed with keys
- // in sorted order. This can be used as a map key or for equality checking
- // between Sets.
+ // Distinct is a unique identifier of a Set.
+ //
+ // Distinct is designed to be ensures equivalence stability: comparisons
+ // will return the save value across versions. For this reason, Distinct
+ // should always be used as a map key instead of a Set.
Distinct struct {
iface interface{}
}
- // Sortable implements sort.Interface, used for sorting KeyValue. This is
- // an exported type to support a memory optimization. A pointer to one of
- // these is needed for the call to sort.Stable(), which the caller may
- // provide in order to avoid an allocation. See NewSetWithSortable().
+ // Sortable implements sort.Interface, used for sorting KeyValue.
+ //
+ // Deprecated: This type is no longer used. It was added as a performance
+ // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no
+ // longer supported by the module).
Sortable []KeyValue
)
@@ -56,12 +56,6 @@ var (
iface: [0]KeyValue{},
},
}
-
- // sortables is a pool of Sortables used to create Sets with a user does
- // not provide one.
- sortables = sync.Pool{
- New: func() interface{} { return new(Sortable) },
- }
)
// EmptySet returns a reference to a Set with no elements.
@@ -187,13 +181,7 @@ func empty() Set {
// Except for empty sets, this method adds an additional allocation compared
// with calls that include a Sortable.
func NewSet(kvs ...KeyValue) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- srt := sortables.Get().(*Sortable)
- s, _ := NewSetWithSortableFiltered(kvs, srt, nil)
- sortables.Put(srt)
+ s, _ := NewSetWithFiltered(kvs, nil)
return s
}
@@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set {
// NewSetWithSortableFiltered for more details.
//
// This call includes a Sortable option as a memory optimization.
-func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
+//
+// Deprecated: Use [NewSet] instead.
+func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
+ s, _ := NewSetWithFiltered(kvs, nil)
return s
}
@@ -220,10 +206,37 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if len(kvs) == 0 {
return empty(), nil
}
- srt := sortables.Get().(*Sortable)
- s, filtered := NewSetWithSortableFiltered(kvs, srt, filter)
- sortables.Put(srt)
- return s, filtered
+
+ // Stable sort so the following de-duplication can implement
+ // last-value-wins semantics.
+ slices.SortStableFunc(kvs, func(a, b KeyValue) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ position := len(kvs) - 1
+ offset := position - 1
+
+ // The requirements stated above require that the stable
+ // result be placed in the end of the input slice, while
+ // overwritten values are swapped to the beginning.
+ //
+ // De-duplicate with last-value-wins semantics. Preserve
+ // duplicate values at the beginning of the input slice.
+ for ; offset >= 0; offset-- {
+ if kvs[offset].Key == kvs[position].Key {
+ continue
+ }
+ position--
+ kvs[offset], kvs[position] = kvs[position], kvs[offset]
+ }
+ kvs = kvs[position:]
+
+ if filter != nil {
+ if div := filteredToFront(kvs, filter); div != 0 {
+ return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ }
+ }
+ return Set{equivalent: computeDistinct(kvs)}, nil
}
// NewSetWithSortableFiltered returns a new Set.
@@ -249,82 +262,71 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
//
// The second []KeyValue return value is a list of attributes that were
// excluded by the Filter (if non-nil).
-func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
-
- *tmp = kvs
-
- // Stable sort so the following de-duplication can implement
- // last-value-wins semantics.
- sort.Stable(tmp)
-
- *tmp = nil
-
- position := len(kvs) - 1
- offset := position - 1
-
- // The requirements stated above require that the stable
- // result be placed in the end of the input slice, while
- // overwritten values are swapped to the beginning.
- //
- // De-duplicate with last-value-wins semantics. Preserve
- // duplicate values at the beginning of the input slice.
- for ; offset >= 0; offset-- {
- if kvs[offset].Key == kvs[position].Key {
- continue
- }
- position--
- kvs[offset], kvs[position] = kvs[position], kvs[offset]
- }
- if filter != nil {
- return filterSet(kvs[position:], filter)
- }
- return Set{
- equivalent: computeDistinct(kvs[position:]),
- }, nil
+//
+// Deprecated: Use [NewSetWithFiltered] instead.
+func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) {
+ return NewSetWithFiltered(kvs, filter)
}
-// filterSet reorders kvs so that included keys are contiguous at the end of
-// the slice, while excluded keys precede the included keys.
-func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- var excluded []KeyValue
-
- // Move attributes that do not match the filter so they're adjacent before
- // calling computeDistinct().
- distinctPosition := len(kvs)
-
- // Swap indistinct keys forward and distinct keys toward the
- // end of the slice.
- offset := len(kvs) - 1
- for ; offset >= 0; offset-- {
- if filter(kvs[offset]) {
- distinctPosition--
- kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
- continue
+// filteredToFront filters slice in-place using keep function. All KeyValues that need to
+// be removed are moved to the front. All KeyValues that need to be kept are
+// moved (in-order) to the back. The index for the first KeyValue to be kept is
+// returned.
+func filteredToFront(slice []KeyValue, keep Filter) int {
+ n := len(slice)
+ j := n
+ for i := n - 1; i >= 0; i-- {
+ if keep(slice[i]) {
+ j--
+ slice[i], slice[j] = slice[j], slice[i]
}
}
- excluded = kvs[:distinctPosition]
-
- return Set{
- equivalent: computeDistinct(kvs[distinctPosition:]),
- }, excluded
+ return j
}
// Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil {
- return Set{
- equivalent: l.equivalent,
- }, nil
+ return *l, nil
+ }
+
+ // Iterate in reverse to the first attribute that will be filtered out.
+ n := l.Len()
+ first := n - 1
+ for ; first >= 0; first-- {
+ kv, _ := l.Get(first)
+ if !re(kv) {
+ break
+ }
+ }
+
+ // No attributes will be dropped, return the immutable Set l and nil.
+ if first < 0 {
+ return *l, nil
}
- // Note: This could be refactored to avoid the temporary slice
- // allocation, if it proves to be expensive.
- return filterSet(l.ToSlice(), re)
+ // Copy now that we know we need to return a modified set.
+ //
+ // Do not do this in-place on the underlying storage of *Set l. Sets are
+ // immutable and filtering should not change this.
+ slice := l.ToSlice()
+
+ // Don't re-iterate the slice if only slice[0] is filtered.
+ if first == 0 {
+ // It is safe to assume len(slice) >= 1 given we found at least one
+ // attribute above that needs to be filtered out.
+ return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ }
+
+ // Move the filtered slice[first] to the front (preserving order).
+ kv := slice[first]
+ copy(slice[1:first+1], slice[:first])
+ slice[0] = kv
+
+ // Do not re-evaluate re(slice[first+1:]).
+ div := filteredToFront(slice[1:first+1], re) + 1
+ return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
}
// computeDistinct returns a Distinct using either the fixed- or
@@ -404,7 +406,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface)
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index cb21dd5c0961..9ea0ecbbd27a 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
@@ -242,15 +231,27 @@ func (v Value) Emit() string {
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
- return fmt.Sprint(v.asInt64Slice())
+ j, err := json.Marshal(v.asInt64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asInt64Slice())
+ }
+ return string(j)
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
- return fmt.Sprint(v.asFloat64Slice())
+ j, err := json.Marshal(v.asFloat64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
+ }
+ return string(j)
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
- return fmt.Sprint(v.asStringSlice())
+ j, err := json.Marshal(v.asStringSlice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asStringSlice())
+ }
+ return string(j)
case STRING:
return v.stringly
default:
diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md
new file mode 100644
index 000000000000..7d798435e126
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/README.md
@@ -0,0 +1,3 @@
+# Baggage
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage)
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 84532cb1da34..c40c896cc667 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package baggage // import "go.opentelemetry.io/otel/baggage"
@@ -18,8 +7,8 @@ import (
"errors"
"fmt"
"net/url"
- "regexp"
"strings"
+ "unicode/utf8"
"go.opentelemetry.io/otel/internal/baggage"
)
@@ -32,16 +21,6 @@ const (
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
-
- keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
- valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
- keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
-)
-
-var (
- keyRe = regexp.MustCompile(`^` + keyDef + `$`)
- valueRe = regexp.MustCompile(`^` + valueDef + `$`)
- propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
@@ -67,7 +46,7 @@ type Property struct {
//
// If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) {
- if !keyRe.MatchString(key) {
+ if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
@@ -77,14 +56,29 @@ func NewKeyProperty(key string) (Property, error) {
// NewKeyValueProperty returns a new Property for key with value.
//
-// If key or value are invalid, an error will be returned.
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewKeyValuePropertyRaw] instead
+// that does not require percent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) {
- if !keyRe.MatchString(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ if !validateValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
- if !valueRe.MatchString(value) {
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
+ return NewKeyValuePropertyRaw(key, decodedValue)
+}
+
+// NewKeyValuePropertyRaw returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewKeyValuePropertyRaw(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
p := Property{
key: key,
@@ -106,20 +100,11 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), nil
}
- match := propertyRe.FindStringSubmatch(property)
- if len(match) != 4 {
+ p, ok := parsePropertyInternal(property)
+ if !ok {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
- var p Property
- if match[1] != "" {
- p.key = match[1]
- } else {
- p.key = match[2]
- p.value = match[3]
- p.hasValue = true
- }
-
return p, nil
}
@@ -130,12 +115,9 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
- if !keyRe.MatchString(p.key) {
+ if !validateKey(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
- if p.hasValue && !valueRe.MatchString(p.value) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
- }
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
@@ -154,11 +136,11 @@ func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
-// String encodes Property into a string compliant with the W3C Baggage
+// String encodes Property into a header string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
- return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
+ return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
}
return p.key
}
@@ -218,7 +200,7 @@ func (p properties) validate() error {
return nil
}
-// String encodes properties into a string compliant with the W3C Baggage
+// String encodes properties into a header string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
@@ -240,11 +222,28 @@ type Member struct {
hasData bool
}
-// NewMember returns a new Member from the passed arguments. The key will be
-// used directly while the value will be url decoded after validation. An error
-// is returned if the created Member would be invalid according to the W3C
-// Baggage specification.
+// NewMember returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewMemberRaw] instead
+// that does not require percent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateValue(value) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewMemberRaw(key, decodedValue, props...)
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
value: value,
@@ -254,11 +253,6 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
- decodedValue, err := url.PathUnescape(value)
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- m.value = decodedValue
return m, nil
}
@@ -274,11 +268,7 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
- var (
- key, value string
- props properties
- )
-
+ var props properties
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
@@ -299,36 +289,34 @@ func parseMember(member string) (Member, error) {
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
- key = strings.TrimSpace(k)
- var err error
- value, err = url.PathUnescape(strings.TrimSpace(v))
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
- }
- if !keyRe.MatchString(key) {
+ key := strings.TrimSpace(k)
+ if !validateKey(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
- if !valueRe.MatchString(value) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+
+ val := strings.TrimSpace(v)
+ if !validateValue(val) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
}
+ // Decode a percent-encoded value.
+ value, err := url.PathUnescape(val)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
+ }
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
// validate ensures m conforms to the W3C Baggage specification.
-// A key is just an ASCII string, but a value must be URL encoded UTF-8,
-// returning an error otherwise.
+// A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
- if !keyRe.MatchString(m.key) {
+ if !validateKey(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
- if !valueRe.MatchString(m.value) {
- return fmt.Errorf("%w: %q", errInvalidValue, m.value)
- }
return m.properties.validate()
}
@@ -341,13 +329,15 @@ func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
-// String encodes Member into a string compliant with the W3C Baggage
+// String encodes Member into a header string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
- // A key is just an ASCII string, but a value is URL encoded UTF-8.
- s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
+ // A key is just an ASCII string. A value is restricted to be
+ // US-ASCII characters excluding CTLs, whitespace,
+ // DQUOTE, comma, semicolon, and backslash.
+ s := m.key + keyValueDelimiter + valueEscape(m.value)
if len(m.properties) > 0 {
- s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
+ s += propertyDelimiter + m.properties.String()
}
return s
}
@@ -536,9 +526,8 @@ func (b Baggage) Len() int {
return len(b.list)
}
-// String encodes Baggage into a string compliant with the W3C Baggage
-// specification. The returned string will be invalid if the Baggage contains
-// any invalid list-members.
+// String encodes Baggage into a header string compliant with the W3C Baggage
+// specification.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
@@ -550,3 +539,372 @@ func (b Baggage) String() string {
}
return strings.Join(members, listDelimiter)
}
+
+// parsePropertyInternal attempts to decode a Property from the passed string.
+// It follows the spec at https://www.w3.org/TR/baggage/#definition.
+func parsePropertyInternal(s string) (p Property, ok bool) {
+ // For the entire function we will use " key = value " as an example.
+ // Attempting to parse the key.
+ // First skip spaces at the beginning "< >key = value " (they could be empty).
+ index := skipSpace(s, 0)
+
+ // Parse the key: " = value ".
+ keyStart := index
+ keyEnd := index
+ for _, c := range s[keyStart:] {
+ if !validateKeyChar(c) {
+ break
+ }
+ keyEnd++
+ }
+
+ // If we couldn't find any valid key character,
+ // it means the key is either empty or invalid.
+ if keyStart == keyEnd {
+ return
+ }
+
+ // Skip spaces after the key: " key< >= value ".
+ index = skipSpace(s, keyEnd)
+
+ if index == len(s) {
+ // A key can have no value, like: " key ".
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ return
+ }
+
+ // If we have not reached the end and we can't find the '=' delimiter,
+ // it means the property is invalid.
+ if s[index] != keyValueDelimiter[0] {
+ return
+ }
+
+ // Attempting to parse the value.
+ // Match: " key =< >value ".
+ index = skipSpace(s, index+1)
+
+ // Match the value string: " key = ".
+ // A valid property can be: " key =".
+ // Therefore, we don't have to check if the value is empty.
+ valueStart := index
+ valueEnd := index
+ for _, c := range s[valueStart:] {
+ if !validateValueChar(c) {
+ break
+ }
+ valueEnd++
+ }
+
+ // Skip all trailing whitespaces: " key = value< >".
+ index = skipSpace(s, valueEnd)
+
+ // If after looking for the value and skipping whitespaces
+ // we have not reached the end, it means the property is
+ // invalid, something like: " key = value value1".
+ if index != len(s) {
+ return
+ }
+
+ // Decode a percent-encoded value.
+ value, err := url.PathUnescape(s[valueStart:valueEnd])
+ if err != nil {
+ return
+ }
+
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ p.hasValue = true
+
+ p.value = value
+ return
+}
+
+func skipSpace(s string, offset int) int {
+ i := offset
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c != ' ' && c != '\t' {
+ break
+ }
+ }
+ return i
+}
+
+var safeKeyCharset = [utf8.RuneSelf]bool{
+ // 0x23 to 0x27
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+
+ // 0x30 to 0x39
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+
+ // 0x41 to 0x5a
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+
+ // 0x5e to 0x7a
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+
+ // remainder
+ '!': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '|': true,
+ '~': true,
+}
+
+func validateKey(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ for _, c := range s {
+ if !validateKeyChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateKeyChar(c int32) bool {
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
+}
+
+func validateValue(s string) bool {
+ for _, c := range s {
+ if !validateValueChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+var safeValueCharset = [utf8.RuneSelf]bool{
+ '!': true, // 0x21
+
+ // 0x23 to 0x2b
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+
+ // 0x2d to 0x3a
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+
+ // 0x3c to 0x5b
+ '<': true, // 0x3C
+ '=': true, // 0x3D
+ '>': true, // 0x3E
+ '?': true, // 0x3F
+ '@': true, // 0x40
+ 'A': true, // 0x41
+ 'B': true, // 0x42
+ 'C': true, // 0x43
+ 'D': true, // 0x44
+ 'E': true, // 0x45
+ 'F': true, // 0x46
+ 'G': true, // 0x47
+ 'H': true, // 0x48
+ 'I': true, // 0x49
+ 'J': true, // 0x4A
+ 'K': true, // 0x4B
+ 'L': true, // 0x4C
+ 'M': true, // 0x4D
+ 'N': true, // 0x4E
+ 'O': true, // 0x4F
+ 'P': true, // 0x50
+ 'Q': true, // 0x51
+ 'R': true, // 0x52
+ 'S': true, // 0x53
+ 'T': true, // 0x54
+ 'U': true, // 0x55
+ 'V': true, // 0x56
+ 'W': true, // 0x57
+ 'X': true, // 0x58
+ 'Y': true, // 0x59
+ 'Z': true, // 0x5A
+ '[': true, // 0x5B
+
+ // 0x5d to 0x7e
+ ']': true, // 0x5D
+ '^': true, // 0x5E
+ '_': true, // 0x5F
+ '`': true, // 0x60
+ 'a': true, // 0x61
+ 'b': true, // 0x62
+ 'c': true, // 0x63
+ 'd': true, // 0x64
+ 'e': true, // 0x65
+ 'f': true, // 0x66
+ 'g': true, // 0x67
+ 'h': true, // 0x68
+ 'i': true, // 0x69
+ 'j': true, // 0x6A
+ 'k': true, // 0x6B
+ 'l': true, // 0x6C
+ 'm': true, // 0x6D
+ 'n': true, // 0x6E
+ 'o': true, // 0x6F
+ 'p': true, // 0x70
+ 'q': true, // 0x71
+ 'r': true, // 0x72
+ 's': true, // 0x73
+ 't': true, // 0x74
+ 'u': true, // 0x75
+ 'v': true, // 0x76
+ 'w': true, // 0x77
+ 'x': true, // 0x78
+ 'y': true, // 0x79
+ 'z': true, // 0x7A
+ '{': true, // 0x7B
+ '|': true, // 0x7C
+ '}': true, // 0x7D
+ '~': true, // 0x7E
+}
+
+func validateValueChar(c int32) bool {
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c]
+}
+
+// valueEscape escapes the string so it can be safely placed inside a baggage value,
+// replacing special characters with %XX sequences as needed.
+//
+// The implementation is based on:
+// /~https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
+func valueEscape(s string) string {
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ var buf [64]byte
+ var t []byte
+
+ required := len(s) + 2*hexCount
+ if required <= len(buf) {
+ t = buf[:required]
+ } else {
+ t = make([]byte, required)
+ }
+
+ j := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(s[i]) {
+ const upperhex = "0123456789ABCDEF"
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+
+ return string(t)
+}
+
+// shouldEscape returns true if the specified byte should be escaped when
+// appearing in a baggage value string.
+func shouldEscape(c byte) bool {
+ if c == '%' {
+ // The percent character must be encoded so that percent-encoding can work.
+ return true
+ }
+ return !validateValueChar(int32(c))
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
index 24b34b7564a8..a572461a05f2 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/context.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
index 4545100df679..b51d87cab704 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/doc.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package baggage provides functionality for storing and retrieving
diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md
new file mode 100644
index 000000000000..24c52b387d22
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/README.md
@@ -0,0 +1,3 @@
+# Codes
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes)
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 587ebae4e30e..df29d96a6dae 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
index 4e328fbb4b3a..ee8db448b8bf 100644
--- a/vendor/go.opentelemetry.io/otel/codes/doc.go
+++ b/vendor/go.opentelemetry.io/otel/codes/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package codes defines the canonical error codes used by OpenTelemetry.
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index daa36c89dc63..441c595014d3 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package otel provides global access to the OpenTelemetry API. The subpackages of
@@ -22,7 +11,7 @@ transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
exporters are used to process and transport this data.
-To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
+To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
To read more about tracing, see go.opentelemetry.io/otel/trace.
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
index 72fad85412be..67414c71e052 100644
--- a/vendor/go.opentelemetry.io/otel/error_handler.go
+++ b/vendor/go.opentelemetry.io/otel/error_handler.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
index 50295223182c..50802d5aee6d 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
@@ -1,51 +1,3 @@
-# OpenTelemetry-Go OTLP Span Exporter
+# OTLP Trace Exporter
-[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
-
-[OpenTelemetry Protocol Exporter](/~https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md) implementation.
-
-## Installation
-
-```
-go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
-```
-
-## Examples
-
-- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go)
-- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector)
-
-## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
-
-The `otlptrace` package provides an exporter implementing the OTel span exporter interface.
-This exporter is configured using a client satisfying the `otlptrace.Client` interface.
-This client handles the transformation of data into wire format and the transmission of that data to the collector.
-
-## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
-
-The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads.
-
-## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
-
-The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads.
-
-## Configuration
-
-### Environment Variables
-
-The following environment variables can be used (instead of options objects) to
-override the default configuration. For more information about how each of
-these environment variables is interpreted, see [the OpenTelemetry
-specification](/~https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md).
-
-| Environment variable | Option | Default value |
-| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
-| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
-| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | |
-| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | |
-| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | |
-| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` |
-
-[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
-
-Configuration using options have precedence over the environment variables.
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
index dbb40cf5820f..3c1a625c0678 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
new file mode 100644
index 000000000000..09ad5eadb6fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptrace contains abstractions for OTLP span exporters.
+See the official OTLP span exporter implementations:
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
+*/
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
index 0dbe15555b34..3f0a518ae0f2 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
@@ -24,9 +13,7 @@ import (
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
-var (
- errAlreadyStarted = errors.New("already started")
-)
+var errAlreadyStarted = errors.New("already started")
// Exporter exports trace data in the OTLP wire format.
type Exporter struct {
@@ -55,7 +42,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan)
// Start establishes a connection to the receiving endpoint.
func (e *Exporter) Start(ctx context.Context) error {
- var err = errAlreadyStarted
+ err := errAlreadyStarted
e.startOnce.Do(func() {
e.mu.Lock()
e.started = true
@@ -106,7 +93,7 @@ func NewUnstarted(client Client) *Exporter {
}
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
func (e *Exporter) MarshalLog() interface{} {
return struct {
Type string
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
index ec74f1aad758..4571a5ca3978 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
index 7aaec38d22ae..f6dd3decc900 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
index 05a1f78adbce..db7b698a566c 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
index b83cbd724780..c3c69c5a0d63 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
@@ -121,6 +110,7 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
if psid := sd.Parent().SpanID(); psid.IsValid() {
s.ParentSpanId = psid[:]
}
+ s.Flags = buildSpanFlags(sd.Parent())
return s
}
@@ -157,16 +147,28 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID()
+ flags := buildSpanFlags(otLink.SpanContext)
+
sl = append(sl, &tracepb.Span_Link{
TraceId: tid[:],
SpanId: sid[:],
Attributes: KeyValues(otLink.Attributes),
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
+ Flags: flags,
})
}
return sl
}
+func buildSpanFlags(sc trace.SpanContext) uint32 {
+ flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
+ if sc.IsRemote() {
+ flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
+ }
+
+ return uint32(flags)
+}
+
// spanEvents transforms span Events to an OTLP span events.
func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
if len(es) == 0 {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
new file mode 100644
index 000000000000..5309bb7cb1c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace gRPC Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 86fb61a0dece..3993df927def 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
@@ -89,11 +78,11 @@ func newClient(opts ...Option) *client {
}
// Start establishes a gRPC connection to the collector.
-func (c *client) Start(ctx context.Context) error {
+func (c *client) Start(context.Context) error {
if c.conn == nil {
// If the caller did not provide a ClientConn when the client was
// created, create one using the configuration they did provide.
- conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
+ conn, err := grpc.NewClient(c.endpoint, c.dialOpts...)
if err != nil {
return err
}
@@ -260,30 +249,38 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
// duration to wait for if an explicit throttle time is included in err.
func retryable(err error) (bool, time.Duration) {
s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
switch s.Code() {
case codes.Canceled,
codes.DeadlineExceeded,
- codes.ResourceExhausted,
codes.Aborted,
codes.OutOfRange,
codes.Unavailable,
codes.DataLoss:
- return true, throttleDelay(s)
+ // Additionally handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
}
// Not a retry-able error.
return false, 0
}
-// throttleDelay returns a duration to wait for if an explicit throttle time
-// is included in the response status.
-func throttleDelay(s *status.Status) time.Duration {
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
for _, detail := range s.Details() {
if t, ok := detail.(*errdetails.RetryInfo); ok {
- return t.RetryDelay.AsDuration()
+ return true, t.RetryDelay.AsDuration()
}
}
- return 0
+ return false, 0
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 000000000000..e783b57ac4bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in /~https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
index 89af41002f7a..b826b84247e0 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
index becb1f0fbbe6..9513c0a57ca6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
@@ -174,13 +163,13 @@ func stringToHeader(value string) map[string]string {
global.Error(errors.New("missing '="), "parse headers", "input", header)
continue
}
- name, err := url.QueryUnescape(n)
+ name, err := url.PathUnescape(n)
if err != nil {
global.Error(err, "escape header key", "key", n)
continue
}
trimmedName := strings.TrimSpace(name)
- value, err := url.QueryUnescape(v)
+ value, err := url.PathUnescape(v)
if err != nil {
global.Error(err, "escape header value", "value", v)
continue
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
index 1fb29061894c..97cd6c54f709 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
index 32f6dddb4f6f..7bb189a94bca 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index 19b8434d4d2c..8f84a7996324 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -2,24 +2,15 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
+ "net/http"
+ "net/url"
"path"
"strings"
"time"
@@ -32,6 +23,7 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
)
const (
@@ -44,6 +36,10 @@ const (
)
type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
SignalConfig struct {
Endpoint string
Insecure bool
@@ -55,6 +51,8 @@ type (
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
+
+ Proxy HTTPTransportProxyFunc
}
Config struct {
@@ -141,9 +139,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.Traces.Compression == GzipCompression {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
- if len(cfg.DialOptions) != 0 {
- cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
- }
if cfg.ReconnectionPeriod != 0 {
p := grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
@@ -261,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
// Generic Options
+// WithEndpoint configures the trace host and port only; endpoint should
+// resemble "example.com" or "localhost:4317". To configure the scheme and path,
+// use WithEndpointURL.
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Endpoint = endpoint
@@ -268,6 +266,26 @@ func WithEndpoint(endpoint string) GenericOption {
})
}
+// WithEndpointURL configures the trace scheme, host, port, and path; the
+// provided value should resemble "https://example.com:4318/v1/traces".
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Traces.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
@@ -326,3 +344,10 @@ func WithTimeout(duration time.Duration) GenericOption {
return cfg
})
}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
index d9dcdc96e7da..3d4f699d4778 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
index 19b6d4b21f99..38b97a013134 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
index 076905e54bfc..a12ea4c48eb0 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
index 3ce7d6632b86..1c5450ab62d9 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
@@ -123,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
- return fmt.Errorf("%w: %s", ctxErr, err)
+ return fmt.Errorf("%w: %w", ctxErr, err)
}
}
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
index 78ce9ad8f0bc..00ab1f20c6d4 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
@@ -64,14 +53,50 @@ func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
-// WithEndpoint sets the target endpoint the exporter will connect to. If
-// unset, localhost:4317 will be used as a default.
+// WithEndpoint sets the target endpoint (host and port) the Exporter will
+// connect to. The provided endpoint should resemble "example.com:4317" (no
+// scheme or path).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
+// WithEndpointURL sets the target endpoint URL (scheme, host, port, path)
+// the Exporter will connect to. The provided endpoint URL should resemble
+// "https://example.com:4318/v1/traces".
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "https://localhost:4317/v1/traces" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
// WithReconnectionPeriod set the minimum amount of time between connection
// attempts to the target endpoint.
//
@@ -93,13 +118,7 @@ func compressorToCompression(compressor string) otlpconfig.Compression {
}
// WithCompressor sets the compressor for the gRPC client to use when sending
-// requests. It is the responsibility of the caller to ensure that the
-// compressor set has been registered with google.golang.org/grpc/encoding.
-// This can be done by encoding.RegisterCompressor. Some compressors
-// auto-register on import, such as gzip, which can be registered by calling
-// `import _ "google.golang.org/grpc/encoding/gzip"`.
-//
-// This option has no effect if WithGRPCConn is used.
+// requests. Supported compressor values: "gzip".
func WithCompressor(compressor string) Option {
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
}
@@ -137,6 +156,8 @@ func WithServiceConfig(serviceConfig string) Option {
// connection. The options here are appended to the internal grpc.DialOptions
// used so they will take precedence over any other internal grpc.DialOptions
// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
//
// This option has no effect if WithGRPCConn is used.
func WithDialOption(opts ...grpc.DialOption) Option {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md
new file mode 100644
index 000000000000..36526400941f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace HTTP Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
index 3a3cfec0cdea..1e59ff239327 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
@@ -18,6 +7,7 @@ import (
"bytes"
"compress/gzip"
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -84,10 +74,17 @@ func NewClient(opts ...Option) otlptrace.Client {
Transport: ourTransport,
Timeout: cfg.Traces.Timeout,
}
- if cfg.Traces.TLSCfg != nil {
- transport := ourTransport.Clone()
- transport.TLSClientConfig = cfg.Traces.TLSCfg
- httpClient.Transport = transport
+
+ if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil {
+ clonedTransport := ourTransport.Clone()
+ httpClient.Transport = clonedTransport
+
+ if cfg.Traces.TLSCfg != nil {
+ clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg
+ }
+ if cfg.Traces.Proxy != nil {
+ clonedTransport.Proxy = cfg.Traces.Proxy
+ }
}
stopCh := make(chan struct{})
@@ -152,6 +149,10 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
request.reset(ctx)
resp, err := d.client.Do(request.Request)
+ var urlErr *url.Error
+ if errors.As(err, &urlErr) && urlErr.Temporary() {
+ return newResponseError(http.Header{})
+ }
if err != nil {
return err
}
@@ -172,8 +173,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
if _, err := io.Copy(&respData, resp.Body); err != nil {
return err
}
+ if respData.Len() == 0 {
+ return nil
+ }
- if respData.Len() != 0 {
+ if resp.Header.Get("Content-Type") == "application/x-protobuf" {
var respProto coltracepb.ExportTraceServiceResponse
if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
return err
@@ -190,7 +194,10 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
}
return nil
- case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
+ case sc == http.StatusTooManyRequests,
+ sc == http.StatusBadGateway,
+ sc == http.StatusServiceUnavailable,
+ sc == http.StatusGatewayTimeout:
// Retry-able failures. Drain the body to reuse the connection.
if _, err := io.Copy(io.Discard, resp.Body); err != nil {
otel.Handle(err)
@@ -236,7 +243,7 @@ func (d *client) newRequest(body []byte) (request, error) {
if _, err := gz.Write(body); err != nil {
return req, err
}
- // Close needs to be called to ensure body if fully written.
+ // Close needs to be called to ensure body is fully written.
if err := gz.Close(); err != nil {
return req, err
}
@@ -309,7 +316,10 @@ func evaluate(err error) (bool, time.Duration) {
return false, 0
}
- rErr, ok := err.(retryableError)
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there are several chained errors, all the errors above it will be
+ // discarded if errors.As is used instead.
+ rErr, ok := err.(retryableError) //nolint:errorlint
if !ok {
return false, 0
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
index e7f066b43ce8..43534cbfba4f 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
@@ -1,19 +1,63 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
-Package otlptracehttp a client that sends traces to the collector using HTTP
-with binary protobuf payloads.
+Package otlptracehttp provides an OTLP span exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/traces.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/traces" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+environment variable and by [WithEndpoint], [WithEndpointURL], [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the compression strategy the exporter uses to compress the HTTP body.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
*/
package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
index 23b8642040d3..fae89ea4fe67 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
index 5e9e8185d15e..26a316d003df 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
@@ -174,13 +163,13 @@ func stringToHeader(value string) map[string]string {
global.Error(errors.New("missing '="), "parse headers", "input", header)
continue
}
- name, err := url.QueryUnescape(n)
+ name, err := url.PathUnescape(n)
if err != nil {
global.Error(err, "escape header key", "key", n)
continue
}
trimmedName := strings.TrimSpace(name)
- value, err := url.QueryUnescape(v)
+ value, err := url.PathUnescape(v)
if err != nil {
global.Error(err, "escape header value", "value", v)
continue
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
index 01347d8c651c..e4142b9d7c57 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
index 45f137a7872a..ff4141b6dc82 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
index 9a595c36a624..2ebbc752f4b7 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
@@ -2,24 +2,15 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
+ "net/http"
+ "net/url"
"path"
"strings"
"time"
@@ -32,6 +23,7 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
)
const (
@@ -44,6 +36,10 @@ const (
)
type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
SignalConfig struct {
Endpoint string
Insecure bool
@@ -55,6 +51,8 @@ type (
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
+
+ Proxy HTTPTransportProxyFunc
}
Config struct {
@@ -141,9 +139,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.Traces.Compression == GzipCompression {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
- if len(cfg.DialOptions) != 0 {
- cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
- }
if cfg.ReconnectionPeriod != 0 {
p := grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
@@ -261,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
// Generic Options
+// WithEndpoint configures the trace host and port only; endpoint should
+// resemble "example.com" or "localhost:4317". To configure the scheme and path,
+// use WithEndpointURL.
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Endpoint = endpoint
@@ -268,6 +266,26 @@ func WithEndpoint(endpoint string) GenericOption {
})
}
+// WithEndpointURL configures the trace scheme, host, port, and path; the
+// provided value should resemble "https://example.com:4318/v1/traces".
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Traces.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
@@ -326,3 +344,10 @@ func WithTimeout(duration time.Duration) GenericOption {
return cfg
})
}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
index 8625674855d1..bc4db059524e 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
index c342f7d68310..dd6f12b220ba 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
index f051ad5d95cd..9e04a9bc1969 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
index 44974ff49bd7..86c4819f4491 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
@@ -2,18 +2,7 @@
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
@@ -123,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
- return fmt.Errorf("%w: %s", ctxErr, err)
+ return fmt.Errorf("%w: %w", ctxErr, err)
}
}
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
index e3ed6494c5dd..6497f3ccdd0e 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
@@ -1,21 +1,12 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
import (
"crypto/tls"
+ "net/http"
+ "net/url"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
@@ -26,6 +17,11 @@ import (
// collector.
type Compression otlpconfig.Compression
+// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+// This type is compatible with http.Transport.Proxy and can be used to set a custom proxy function
+// to the OTLP HTTP client.
+type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
const (
// NoCompression tells the driver to send payloads without
// compression.
@@ -60,15 +56,48 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config
return w.ApplyHTTPOption(cfg)
}
-// WithEndpoint allows one to set the address of the collector
-// endpoint that the driver will use to send spans. If
-// unset, it will instead try to use
-// the default endpoint (localhost:4318). Note that the endpoint
-// must not contain any URL path.
+// WithEndpoint sets the target endpoint (host and port) the Exporter will
+// connect to. The provided endpoint should resemble "example.com:4318" (no
+// scheme or path).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence. Note, both environment variables include the full
+// scheme and path, while WithEndpoint sets only the host and port.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
+// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) the
+// Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
// WithCompression tells the driver to compress the sent data.
func WithCompression(compression Compression) Option {
return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
@@ -114,3 +143,10 @@ func WithTimeout(duration time.Duration) Option {
func WithRetry(rc RetryConfig) Option {
return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
}
+
+// WithProxy sets the Proxy function the client will use to determine the
+// proxy to use for an HTTP request. If this option is not used, the client
+// will use [http.ProxyFromEnvironment].
+func WithProxy(pf HTTPTransportProxyFunc) Option {
+ return wrappedOption{otlpconfig.WithProxy(otlpconfig.HTTPTransportProxyFunc(pf))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
index 10ac73ee3b8e..14ad8c33b48d 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -1,20 +1,9 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
- return "1.19.0"
+ return "1.28.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
index 9a58fb1d372e..93e80ea306c1 100644
--- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
+++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
@@ -1,18 +1,7 @@
#!/usr/bin/env bash
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
index 4115fe3bbb5a..07623b679145 100644
--- a/vendor/go.opentelemetry.io/otel/handler.go
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
@@ -18,12 +7,8 @@ import (
"go.opentelemetry.io/otel/internal/global"
)
-var (
- // Compile-time check global.ErrDelegator implements ErrorHandler.
- _ ErrorHandler = (*global.ErrDelegator)(nil)
- // Compile-time check global.ErrLogger implements ErrorHandler.
- _ ErrorHandler = (*global.ErrLogger)(nil)
-)
+// Compile-time check global.ErrDelegator implements ErrorHandler.
+var _ ErrorHandler = (*global.ErrDelegator)(nil)
// GetErrorHandler returns the global ErrorHandler instance.
//
@@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
-// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) { global.Handle(err) }
+// Handle is a convenience function for GetErrorHandler().Handle(err).
+func Handle(err error) { global.GetErrorHandler().Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
index 622c3ee3f276..822d84794741 100644
--- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package attribute provide several helper functions for some commonly used
@@ -25,33 +14,33 @@ import (
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) interface{} {
var zero bool
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) interface{} {
var zero int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) interface{} {
var zero float64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) interface{} {
var zero string
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
index b96e5408e69a..b4f85f44a93e 100644
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package baggage provides base types and functionality to store and retrieve
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
index 4469700d9cbb..3aea9c491f0b 100644
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package baggage // import "go.opentelemetry.io/otel/internal/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go
index f532f07e9e52..4259f0320d4d 100644
--- a/vendor/go.opentelemetry.io/otel/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/internal/gen.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/internal"
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
index 5e9b83047924..c657ff8e755a 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -1,38 +1,13 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
- "os"
"sync/atomic"
)
-var (
- // GlobalErrorHandler provides an ErrorHandler that can be used
- // throughout an OpenTelemetry instrumented project. When a user
- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
- // `Handle` and will be delegated to the registered ErrorHandler.
- GlobalErrorHandler = defaultErrorHandler()
-
- // Compile-time check that delegator implements ErrorHandler.
- _ ErrorHandler = (*ErrDelegator)(nil)
- // Compile-time check that errLogger implements ErrorHandler.
- _ ErrorHandler = (*ErrLogger)(nil)
-)
-
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// Handle handles any error deemed irremediable by an OpenTelemetry
@@ -44,59 +19,18 @@ type ErrDelegator struct {
delegate atomic.Pointer[ErrorHandler]
}
-func (d *ErrDelegator) Handle(err error) {
- d.getDelegate().Handle(err)
-}
+// Compile-time check that delegator implements ErrorHandler.
+var _ ErrorHandler = (*ErrDelegator)(nil)
-func (d *ErrDelegator) getDelegate() ErrorHandler {
- return *d.delegate.Load()
+func (d *ErrDelegator) Handle(err error) {
+ if eh := d.delegate.Load(); eh != nil {
+ (*eh).Handle(err)
+ return
+ }
+ log.Print(err)
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
d.delegate.Store(&eh)
}
-
-func defaultErrorHandler() *ErrDelegator {
- d := &ErrDelegator{}
- d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
- return d
-}
-
-// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
-type ErrLogger struct {
- l *log.Logger
-}
-
-// Handle logs err if no delegate is set, otherwise it is delegated.
-func (h *ErrLogger) Handle(err error) {
- h.l.Print(err)
-}
-
-// GetErrorHandler returns the global ErrorHandler instance.
-//
-// The default ErrorHandler instance returned will log all errors to STDERR
-// until an override ErrorHandler is set with SetErrorHandler. All
-// ErrorHandler returned prior to this will automatically forward errors to
-// the set instance instead of logging.
-//
-// Subsequent calls to SetErrorHandler after the first will not forward errors
-// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler {
- return GlobalErrorHandler
-}
-
-// SetErrorHandler sets the global ErrorHandler to h.
-//
-// The first time this is called all ErrorHandler previously returned from
-// GetErrorHandler will send errors to h instead of the default logging
-// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
-// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) {
- GlobalErrorHandler.setDelegate(h)
-}
-
-// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) {
- GetErrorHandler().Handle(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
index ebb13c20678e..3a0cc42f6a47 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
}
}
+type sfGauge struct {
+ embedded.Float64Gauge
+
+ name string
+ opts []metric.Float64GaugeOption
+
+ delegate atomic.Value // metric.Float64Gauge
+}
+
+var _ metric.Float64Gauge = (*sfGauge)(nil)
+
+func (i *sfGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Gauge).Record(ctx, x, opts...)
+ }
+}
+
type siCounter struct {
embedded.Int64Counter
@@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
}
}
+
+type siGauge struct {
+ embedded.Int64Gauge
+
+ name string
+ opts []metric.Int64GaugeOption
+
+ delegate atomic.Value // metric.Int64Gauge
+}
+
+var _ metric.Int64Gauge = (*siGauge)(nil)
+
+func (i *siGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index c6f305a2b76a..adbca7d34775 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -23,17 +12,20 @@ import (
"github.com/go-logr/stdr"
)
-// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
+// globalLogger holds a reference to the [logr.Logger] used within
+// go.opentelemetry.io/otel.
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
-var globalLogger atomic.Pointer[logr.Logger]
+var globalLogger = func() *atomic.Pointer[logr.Logger] {
+ l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
-func init() {
- SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
-}
+ p := new(atomic.Pointer[logr.Logger])
+ p.Store(&l)
+ return p
+}()
-// SetLogger overrides the globalLogger with l.
+// SetLogger sets the global Logger to l.
//
// To see Warn messages use a logger with `l.V(1).Enabled() == true`
// To see Info messages use a logger with `l.V(4).Enabled() == true`
@@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) {
globalLogger.Store(&l)
}
-func getLogger() logr.Logger {
+// GetLogger returns the global logger.
+func GetLogger() logr.Logger {
return *globalLogger.Load()
}
// Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
- getLogger().V(4).Info(msg, keysAndValues...)
+ GetLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
- getLogger().Error(err, msg, keysAndValues...)
+ GetLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
- getLogger().V(8).Info(msg, keysAndValues...)
+ GetLogger().V(8).Info(msg, keysAndValues...)
}
// Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event.
func Warn(msg string, keysAndValues ...interface{}) {
- getLogger().V(1).Info(msg, keysAndValues...)
+ GetLogger().V(1).Info(msg, keysAndValues...)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index 0097db478c6a..cfd1df9bfa28 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -76,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
key := il{
name: name,
version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
}
if p.meters == nil {
@@ -130,9 +120,11 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
inst.setDelegate(meter)
}
- for e := m.registry.Front(); e != nil; e = e.Next() {
+ var n *list.Element
+ for e := m.registry.Front(); e != nil; e = n {
r := e.Value.(*registration)
r.setDelegate(meter)
+ n = e.Next()
m.registry.Remove(e)
}
@@ -173,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
return i, nil
}
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Int64Gauge(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &siGauge{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
+}
+
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableCounter(name, options...)
@@ -239,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram
return i, nil
}
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64Gauge(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &sfGauge{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
+}
+
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableCounter(name, options...)
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
index 06bac35c2fe5..38560ff9915f 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
index 7985005bcb6d..204ea142a505 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -25,6 +14,10 @@ import (
)
type (
+ errorHandlerHolder struct {
+ eh ErrorHandler
+ }
+
tracerProviderHolder struct {
tp trace.TracerProvider
}
@@ -39,15 +32,59 @@ type (
)
var (
+ globalErrorHandler = defaultErrorHandler()
globalTracer = defaultTracerValue()
globalPropagators = defaultPropagatorsValue()
globalMeterProvider = defaultMeterProvider()
+ delegateErrorHandlerOnce sync.Once
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
delegateMeterOnce sync.Once
)
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+ return globalErrorHandler.Load().(errorHandlerHolder).eh
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+ current := GetErrorHandler()
+
+ if _, cOk := current.(*ErrDelegator); cOk {
+ if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
+ // Do not assign to the delegate of the default ErrDelegator to be
+ // itself.
+ Error(
+ errors.New("no ErrorHandler delegate configured"),
+ "ErrorHandler remains its current value.",
+ )
+ return
+ }
+ }
+
+ delegateErrorHandlerOnce.Do(func() {
+ if def, ok := current.(*ErrDelegator); ok {
+ def.setDelegate(h)
+ }
+ })
+ globalErrorHandler.Store(errorHandlerHolder{eh: h})
+}
+
// TracerProvider is the internal implementation for global.TracerProvider.
func TracerProvider() trace.TracerProvider {
return globalTracer.Load().(tracerProviderHolder).tp
@@ -63,7 +100,7 @@ func SetTracerProvider(tp trace.TracerProvider) {
// to itself.
Error(
errors.New("no delegate configured in tracer provider"),
- "Setting tracer provider to it's current value. No delegate will be configured",
+ "Setting tracer provider to its current value. No delegate will be configured",
)
return
}
@@ -92,7 +129,7 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) {
// delegate to itself.
Error(
errors.New("no delegate configured in text map propagator"),
- "Setting text map propagator to it's current value. No delegate will be configured",
+ "Setting text map propagator to its current value. No delegate will be configured",
)
return
}
@@ -123,7 +160,7 @@ func SetMeterProvider(mp metric.MeterProvider) {
// to itself.
Error(
errors.New("no delegate configured in meter provider"),
- "Setting meter provider to it's current value. No delegate will be configured",
+ "Setting meter provider to its current value. No delegate will be configured",
)
return
}
@@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) {
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
+func defaultErrorHandler() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
+ return v
+}
+
func defaultTracerValue() *atomic.Value {
v := &atomic.Value{}
v.Store(tracerProviderHolder{tp: &tracerProvider{}})
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
index 3f61ec12a34f..e31f442b48f9 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -97,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
key := il{
name: name,
version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
}
if p.tracers == nil {
@@ -112,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
-type il struct {
- name string
- version string
-}
+type il struct{ name, version, schema string }
// tracer is a placeholder for a trace.Tracer.
//
@@ -193,6 +180,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
// SetName does nothing.
func (nonRecordingSpan) SetName(string) {}
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
index e07e7940004c..3e7bb3b3566c 100644
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/internal"
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
index c4f8acd5d837..6de7f2e4d886 100644
--- a/vendor/go.opentelemetry.io/otel/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal_logging.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
index f955171951fd..1e6473b32f35 100644
--- a/vendor/go.opentelemetry.io/otel/metric.go
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md
new file mode 100644
index 000000000000..0cf902e01f0b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/README.md
@@ -0,0 +1,3 @@
+# Metric API
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
index 072baa8e8d0a..cf23db778032 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -50,7 +39,7 @@ type Float64ObservableCounter interface {
}
// Float64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64ObservableCounterConfig struct {
description string
unit string
@@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface {
}
// Float64ObservableUpDownCounterConfig contains options for asynchronous
-// counter instruments that record int64 values.
+// counter instruments that record float64 values.
type Float64ObservableUpDownCounterConfig struct {
description string
unit string
@@ -165,7 +154,7 @@ type Float64ObservableGauge interface {
}
// Float64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64ObservableGaugeConfig struct {
description string
unit string
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
index 9bd6ebf02054..c82ba5324e22 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
index 778ad2d748b5..d9e3b13e4d13 100644
--- a/vendor/go.opentelemetry.io/otel/metric/config.go
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
index 54716e13b355..f153745b005f 100644
--- a/vendor/go.opentelemetry.io/otel/metric/doc.go
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package metric provides the OpenTelemetry API used to measure metrics about
@@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and
See the [OpenTelemetry documentation] for more information about instruments
and their intended use.
+# Instrument Name
+
+OpenTelemetry defines an [instrument name syntax] that restricts what
+instrument names are allowed.
+
+Instrument names should ...
+
+ - Not be empty.
+ - Have an alphabetic character as their first letter.
+ - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’,
+ ‘-’, or ‘/’.
+ - Have a maximum length of 255 letters.
+
+To ensure compatibility with observability platforms, all instruments created
+need to conform to this syntax. Not all implementations of the API will validate
+these names, it is the callers responsibility to ensure compliance.
+
# Measurements
Measurements are made by recording values and information about the values with
@@ -164,6 +170,7 @@ It is strongly recommended that authors only embed
That implementation is the only one OpenTelemetry authors can guarantee will
fully implement all the API interfaces when a user updates their API.
+[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax
[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
*/
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
new file mode 100644
index 000000000000..1f6e0efa73d8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
@@ -0,0 +1,3 @@
+# Metric Embedded
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
index ae0bdbd2e645..1a9dc68093fe 100644
--- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package embedded provides interfaces embedded within the [OpenTelemetry
// metric API].
@@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() }
// the API package).
type Float64Histogram interface{ float64Histogram() }
+// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64Gauge interface{ float64Gauge() }
+
// Float64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
//
@@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() }
// the API package).
type Int64Histogram interface{ int64Histogram() }
+// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience
+// a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64Gauge interface{ int64Gauge() }
+
// Int64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
//
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
index be89cd533417..ea52e402331b 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -27,6 +16,7 @@ type InstrumentOption interface {
Int64CounterOption
Int64UpDownCounterOption
Int64HistogramOption
+ Int64GaugeOption
Int64ObservableCounterOption
Int64ObservableUpDownCounterOption
Int64ObservableGaugeOption
@@ -34,6 +24,7 @@ type InstrumentOption interface {
Float64CounterOption
Float64UpDownCounterOption
Float64HistogramOption
+ Float64GaugeOption
Float64ObservableCounterOption
Float64ObservableUpDownCounterOption
Float64ObservableGaugeOption
@@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra
return c
}
+func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.description = string(o)
return c
@@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi
return c
}
+func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.description = string(o)
return c
@@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra
return c
}
+func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.unit = string(o)
return c
@@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi
return c
}
+func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.unit = string(o)
return c
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 2520bc74af17..6a7991e0151c 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -58,17 +47,37 @@ type Meter interface {
// Int64Counter returns a new Int64Counter instrument identified by name
// and configured with options. The instrument is used to synchronously
// record increasing int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record int64 measurements during a computational
// operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
// Int64Histogram returns a new Int64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of int64 measurements during a
// computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+ // Int64Gauge returns a new Int64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
// Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a
@@ -78,6 +87,10 @@ type Meter interface {
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
// instrument identified by name and configured with options. The
@@ -88,6 +101,10 @@ type Meter interface {
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
@@ -98,23 +115,47 @@ type Meter interface {
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
// Float64Counter returns a new Float64Counter instrument identified by
// name and configured with options. The instrument is used to
// synchronously record increasing float64 measurements during a
// computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record float64 measurements during a computational
// operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
// Float64Histogram returns a new Float64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of float64 measurements during a
// computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+ // Float64Gauge returns a new Float64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous float64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
// Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64
@@ -124,6 +165,10 @@ type Meter interface {
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
// Float64ObservableUpDownCounter returns a new
// Float64ObservableUpDownCounter instrument identified by name and
@@ -134,6 +179,10 @@ type Meter interface {
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
@@ -144,6 +193,10 @@ type Meter interface {
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
// RegisterCallback registers f to be called during the collection of a
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 000000000000..bb89694356b8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 000000000000..ca6fcbdc0996
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,281 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64Gauge = Int64Gauge{}
+ _ metric.Float64Gauge = Float64Gauge{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ return Int64Gauge{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ return Float64Gauge{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
index 0a4825ae6a79..8403a4bad2de 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -39,7 +28,7 @@ type Float64Counter interface {
}
// Float64CounterConfig contains options for synchronous counter instruments that
-// record int64 values.
+// record float64 values.
type Float64CounterConfig struct {
description string
unit string
@@ -92,7 +81,7 @@ type Float64UpDownCounter interface {
}
// Float64UpDownCounterConfig contains options for synchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64UpDownCounterConfig struct {
description string
unit string
@@ -144,8 +133,8 @@ type Float64Histogram interface {
Record(ctx context.Context, incr float64, options ...RecordOption)
}
-// Float64HistogramConfig contains options for synchronous counter instruments
-// that record int64 values.
+// Float64HistogramConfig contains options for synchronous histogram
+// instruments that record float64 values.
type Float64HistogramConfig struct {
description string
unit string
@@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
type Float64HistogramOption interface {
applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
}
+
+// Float64Gauge is an instrument that records instantaneous float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value float64, options ...RecordOption)
+}
+
+// Float64GaugeConfig contains options for synchronous gauge instruments that
+// record float64 values.
+type Float64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts
+// applied.
+func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig {
+ var config Float64GaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Float64GaugeOption applies options to a [Float64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64GaugeOption.
+type Float64GaugeOption interface {
+ applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
index 56667d32fc01..783fdfba7739 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -144,7 +133,7 @@ type Int64Histogram interface {
Record(ctx context.Context, incr int64, options ...RecordOption)
}
-// Int64HistogramConfig contains options for synchronous counter instruments
+// Int64HistogramConfig contains options for synchronous histogram instruments
// that record int64 values.
type Int64HistogramConfig struct {
description string
@@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
type Int64HistogramOption interface {
applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
}
+
+// Int64Gauge is an instrument that records instantaneous int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value int64, options ...RecordOption)
+}
+
+// Int64GaugeConfig contains options for synchronous gauge instruments that
+// record int64 values.
+type Int64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts
+// applied.
+func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig {
+ var config Int64GaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Int64GaugeOption applies options to a [Int64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Int64GaugeOption.
+type Int64GaugeOption interface {
+ applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
index d29aaa32c0b5..2fd9497338ff 100644
--- a/vendor/go.opentelemetry.io/otel/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md
new file mode 100644
index 000000000000..e2959ac747a2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/README.md
@@ -0,0 +1,3 @@
+# Propagation
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation)
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
index 303cdf1cbff4..552263ba7343 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
index c119eb2858b6..33a3baf15f15 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/doc.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package propagation contains OpenTelemetry context propagators.
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
index c94438f73a5e..8c8286aab4d2 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 75a8f3435a52..6870e316dc00 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package propagation // import "go.opentelemetry.io/otel/propagation"
@@ -18,7 +7,7 @@ import (
"context"
"encoding/hex"
"fmt"
- "regexp"
+ "strings"
"go.opentelemetry.io/otel/trace"
)
@@ -28,6 +17,7 @@ const (
maxVersion = 254
traceparentHeader = "traceparent"
tracestateHeader = "tracestate"
+ delimiter = "-"
)
// TraceContext is a propagator that supports the W3C Trace Context format
@@ -41,11 +31,11 @@ const (
type TraceContext struct{}
var (
- _ TextMapPropagator = TraceContext{}
- traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$")
+ _ TextMapPropagator = TraceContext{}
+ versionPart = fmt.Sprintf("%.2X", supportedVersion)
)
-// Inject set tracecontext from the Context into the carrier.
+// Inject injects the trace context from ctx into carrier.
func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
sc := trace.SpanContextFromContext(ctx)
if !sc.IsValid() {
@@ -59,12 +49,19 @@ func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
// Clear all flags other than the trace-context supported sampling bit.
flags := sc.TraceFlags() & trace.FlagsSampled
- h := fmt.Sprintf("%.2x-%s-%s-%s",
- supportedVersion,
- sc.TraceID(),
- sc.SpanID(),
- flags)
- carrier.Set(traceparentHeader, h)
+ var sb strings.Builder
+ sb.Grow(2 + 32 + 16 + 2 + 3)
+ _, _ = sb.WriteString(versionPart)
+ traceID := sc.TraceID()
+ spanID := sc.SpanID()
+ flagByte := [1]byte{byte(flags)}
+ var buf [32]byte
+ for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+ _ = sb.WriteByte(delimiter[0])
+ n := hex.Encode(buf[:], src)
+ _, _ = sb.Write(buf[:n])
+ }
+ carrier.Set(traceparentHeader, sb.String())
}
// Extract reads tracecontext from the carrier into a returned Context.
@@ -86,21 +83,8 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
- matches := traceCtxRegExp.FindStringSubmatch(h)
-
- if len(matches) == 0 {
- return trace.SpanContext{}
- }
-
- if len(matches) < 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[1]) != 2 {
- return trace.SpanContext{}
- }
- ver, err := hex.DecodeString(matches[1])
- if err != nil {
+ var ver [1]byte
+ if !extractPart(ver[:], &h, 2) {
return trace.SpanContext{}
}
version := int(ver[0])
@@ -108,36 +92,24 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
- if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[2]) != 32 {
- return trace.SpanContext{}
- }
-
var scc trace.SpanContextConfig
-
- scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
- if err != nil {
+ if !extractPart(scc.TraceID[:], &h, 32) {
return trace.SpanContext{}
}
-
- if len(matches[3]) != 16 {
- return trace.SpanContext{}
- }
- scc.SpanID, err = trace.SpanIDFromHex(matches[3])
- if err != nil {
+ if !extractPart(scc.SpanID[:], &h, 16) {
return trace.SpanContext{}
}
- if len(matches[4]) != 2 {
+ var opts [1]byte
+ if !extractPart(opts[:], &h, 2) {
return trace.SpanContext{}
}
- opts, err := hex.DecodeString(matches[4])
- if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
+ if version == 0 && (h != "" || opts[0] > 2) {
+ // version 0 not allow extra
+ // version 0 not allow other flag
return trace.SpanContext{}
}
+
// Clear all flags other than the trace-context supported sampling bit.
scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
@@ -155,6 +127,29 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return sc
}
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+ for _, c := range v {
+ if c >= 'A' && c <= 'F' {
+ return true
+ }
+ }
+ return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+ part, left, _ := strings.Cut(*h, delimiter)
+ *h = left
+ // hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+ if len(part) != n || upperHex(part) {
+ return false
+ }
+ if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+ return false
+ }
+ return true
+}
+
// Fields returns the keys who's values are set with Inject.
func (tc TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
new file mode 100644
index 000000000000..8c5ac55ca93b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -0,0 +1,24 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended"
+ ],
+ "ignorePaths": [],
+ "labels": ["Skip Changelog", "dependencies"],
+ "postUpdateOptions" : [
+ "gomodTidy"
+ ],
+ "packageRules": [
+ {
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": true
+ },
+ {
+ "matchFileNames": ["internal/tools/**"],
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": false
+ }
+ ]
+}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
index e0a43e13840e..ab09daf9d53c 100644
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -1 +1 @@
-codespell==2.2.6
+codespell==2.3.0
diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md
new file mode 100644
index 000000000000..f81b1576ad48
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/README.md
@@ -0,0 +1,3 @@
+# SDK
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
new file mode 100644
index 000000000000..06e6d8685484
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
@@ -0,0 +1,3 @@
+# SDK Instrumentation
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
index 6e923acab432..a4faa6a03d50 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package instrumentation provides types to represent the code libraries that
// provide OpenTelemetry instrumentation. These types are used in the
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
index 39f025a1715b..f4d1857c4f42 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
index 09c6d93f6d09..728115045bb4 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
index 59dcfab25013..07923ed8d945 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package env // import "go.opentelemetry.io/otel/sdk/internal/env"
@@ -33,7 +22,7 @@ const (
BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
// BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
// 512). Note: it must be less than or equal to
- // EnvBatchSpanProcessorMaxQueueSize.
+ // BatchSpanProcessorMaxQueueSize.
BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
// AttributeValueLengthKey is the maximum allowed attribute value size.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
deleted file mode 100644
index bd84f624b45d..000000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/sdk/internal"
-
-//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
-//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
-//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
-
-//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
-//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
deleted file mode 100644
index dfeaaa8ca044..000000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/sdk/internal"
-
-import "time"
-
-// MonotonicEndTime returns the end time at present
-// but offset from start, monotonically.
-//
-// The monotonic clock is used in subtractions hence
-// the duration since start added back to start gives
-// end as a monotonic time.
-// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
-func MonotonicEndTime(start time.Time) time.Time {
- return start.Add(time.Since(start))
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
new file mode 100644
index 000000000000..fab61647c2d0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
@@ -0,0 +1,46 @@
+# Experimental Features
+
+The SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Resource](#resource)
+
+### Resource
+
+[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental.
+To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable.
+The value set must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+
+
+[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/
+[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector
+
+#### Examples
+
+Enable experimental resource semantic conventions.
+
+```console
+export OTEL_GO_X_RESOURCE=true
+```
+
+Disable experimental resource semantic conventions.
+
+```console
+unset OTEL_GO_X_RESOURCE
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
new file mode 100644
index 000000000000..68d296cbed36
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x contains support for OTel SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// Resource is an experimental feature flag that defines if resource detectors
+// should be included experimental semantic conventions.
+//
+// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
+ if strings.ToLower(v) == "true" {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // /~https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
new file mode 100644
index 000000000000..4ad864d716ef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
@@ -0,0 +1,3 @@
+# SDK Resource
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
index 4279013be880..95a61d61d49c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -41,8 +30,20 @@ type Detector interface {
// must never be done outside of a new major release.
}
-// Detect calls all input detectors sequentially and merges each result with the previous one.
-// It returns the merged error too.
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
r := new(Resource)
return r, detect(ctx, r, detectors)
@@ -50,6 +51,10 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
// detect runs all detectors using ctx and merges the result into res. This
// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
func detect(ctx context.Context, res *Resource, detectors []Detector) error {
var (
r *Resource
@@ -78,6 +83,11 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error {
if len(errs) == 0 {
return nil
}
+ if errors.Is(errs, ErrSchemaURLConflict) {
+ // If there has been a merge conflict, ensure the resource has no
+ // schema URL.
+ res.schemaURL = ""
+ }
return errs
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index c63a0dd1f8c1..6ac1cdbf7b45 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -20,9 +9,11 @@ import (
"os"
"path/filepath"
+ "github.com/google/uuid"
+
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type (
@@ -47,6 +38,8 @@ type (
}
defaultServiceNameDetector struct{}
+
+ defaultServiceInstanceIDDetector struct{}
)
var (
@@ -54,6 +47,7 @@ var (
_ Detector = host{}
_ Detector = stringDetector{}
_ Detector = defaultServiceNameDetector{}
+ _ Detector = defaultServiceInstanceIDDetector{}
)
// Detect returns a *Resource that describes the OpenTelemetry SDK used.
@@ -106,3 +100,19 @@ func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error)
},
).Detect(ctx)
}
+
+// Detect implements Detector.
+func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceInstanceIDKey,
+ func() (string, error) {
+ version4Uuid, err := uuid.NewRandom()
+ if err != nil {
+ return "", err
+ }
+
+ return version4Uuid.String(), nil
+ },
+ ).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
index f263919f6ec0..0d6e213d9245 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 3d536228283e..5ecd859a52d8 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -22,14 +11,14 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type containerIDProvider func() (string, error)
var (
containerID containerIDProvider = getContainerIDFromCGroup
- cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`)
+ cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
)
type cgroupContainerIDDetector struct{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
index d55a50b0dc2c..64939a271315 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package resource provides detecting and representing resources.
//
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index e29ae563a694..813f05624244 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -23,7 +12,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
const (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index fb1ebf2cab29..2d0f65498a07 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -19,7 +8,7 @@ import (
"errors"
"strings"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type hostIDProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
index 1778bbacf059..cc8b8938ed54 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build dragonfly || freebsd || netbsd || openbsd || solaris
// +build dragonfly freebsd netbsd openbsd solaris
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
index ba41409b23ca..b09fde3b7350 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
index 207acb0ed3aa..d9e5d1a8fff6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
index 410579b8fc97..f84f173240fb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build linux
// +build linux
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
index 721e3ca6e7d3..6354b356022c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
index 89df9d6882e5..df12c44c5646 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -1,25 +1,8 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
-// +build !darwin
-// +build !dragonfly
-// +build !freebsd
-// +build !linux
-// +build !netbsd
-// +build !openbsd
-// +build !solaris
-// +build !windows
+//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
index 5b431c6ee6e3..71386e2da4c7 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build windows
// +build windows
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 0cbd559739c9..8a48ab4fa32e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -19,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type osDescriptionProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
index 24ec85793de6..ce455dc544bf 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
index c771942deece..f537e5ca5c4a 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
index 1c84afc1852e..a6ff26a4d270 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
index 3ebcb534f28e..a77742b07719 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -1,27 +1,8 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
-// +build !aix
-// +build !darwin
-// +build !dragonfly
-// +build !freebsd
-// +build !linux
-// +build !netbsd
-// +build !openbsd
-// +build !solaris
-// +build !windows
-// +build !zos
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
index faad64d8daa6..5e3d199d7856 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index ecdd11dd7622..085fe68fd770 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
@@ -22,7 +11,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
index 176ff1066688..ad4b50df4040 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -1,26 +1,17 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"context"
"errors"
+ "fmt"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
)
// Resource describes an entity about which identifying information
@@ -40,9 +31,20 @@ var (
defaultResourceOnce sync.Once
)
-var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL")
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
-// New returns a Resource combined from the user-provided detectors.
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
func New(ctx context.Context, opts ...Option) (*Resource, error) {
cfg := config{}
for _, opt := range opts {
@@ -98,7 +100,7 @@ func (r *Resource) String() string {
return r.attrs.Encoded(attribute.DefaultEncoder())
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
func (r *Resource) MarshalLog() interface{} {
return struct {
Attributes attribute.Set
@@ -146,16 +148,29 @@ func (r *Resource) Equal(eq *Resource) bool {
return r.Equivalent() == eq.Equivalent()
}
-// Merge creates a new resource by combining resource a and b.
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
//
-// If there are common keys between resource a and b, then the value
-// from resource b will overwrite the value from resource a, even
-// if resource b's value is empty.
+// - If a's schema URL is empty then the returned Resource's schema URL will
+// be set to the schema URL of b,
+// - Else if b's schema URL is empty then the returned Resource's schema URL
+// will be set to the schema URL of a,
+// - Else if the schema URLs of a and b are the same then that will be the
+// schema URL of the returned Resource,
+// - Else this is a merging error. If the resources have different,
+// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+// be returned with the merged Resource. The merged Resource will have an
+// empty schema URL. It may be the case that some unintended attributes
+// have been overwritten or old semantic conventions persisted in the
+// returned Resource. It is up to the caller to determine if this returned
+// Resource should be used or not.
//
-// The SchemaURL of the resources will be merged according to the spec rules:
-// /~https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
-// If the resources have different non-empty schemaURL an empty resource and an error
-// will be returned.
+// [OpenTelemetry specification rules]: /~https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
func Merge(a, b *Resource) (*Resource, error) {
if a == nil && b == nil {
return Empty(), nil
@@ -167,19 +182,6 @@ func Merge(a, b *Resource) (*Resource, error) {
return a, nil
}
- // Merge the schema URL.
- var schemaURL string
- switch true {
- case a.schemaURL == "":
- schemaURL = b.schemaURL
- case b.schemaURL == "":
- schemaURL = a.schemaURL
- case a.schemaURL == b.schemaURL:
- schemaURL = a.schemaURL
- default:
- return Empty(), errMergeConflictSchemaURL
- }
-
// Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
// Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
mi := attribute.NewMergeIterator(b.Set(), a.Set())
@@ -187,8 +189,23 @@ func Merge(a, b *Resource) (*Resource, error) {
for mi.Next() {
combine = append(combine, mi.Attribute())
}
- merged := NewWithAttributes(schemaURL, combine...)
- return merged, nil
+
+ switch {
+ case a.schemaURL == "":
+ return NewWithAttributes(b.schemaURL, combine...), nil
+ case b.schemaURL == "":
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ case a.schemaURL == b.schemaURL:
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ }
+ // Return the merged resource with an appropriate error. It is up to
+ // the user to decide if the returned resource can be used or not.
+ return NewSchemaless(combine...), fmt.Errorf(
+ "%w: %s and %s",
+ ErrSchemaURLConflict,
+ a.schemaURL,
+ b.schemaURL,
+ )
}
// Empty returns an instance of Resource with no attributes. It is
@@ -202,11 +219,17 @@ func Empty() *Resource {
func Default() *Resource {
defaultResourceOnce.Do(func() {
var err error
- defaultResource, err = Detect(
- context.Background(),
+ defaultDetectors := []Detector{
defaultServiceNameDetector{},
fromEnv{},
telemetrySDK{},
+ }
+ if x.Resource.Enabled() {
+ defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...)
+ }
+ defaultResource, err = Detect(
+ context.Background(),
+ defaultDetectors...,
)
if err != nil {
otel.Handle(err)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
new file mode 100644
index 000000000000..f2936e143920
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
@@ -0,0 +1,3 @@
+# SDK Trace
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index c9c7effbf38a..1d399a75db2f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
@@ -392,7 +381,7 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
}
}
-func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
+func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() {
return false
}
@@ -406,7 +395,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan)
return false
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
func (bsp *batchSpanProcessor) MarshalLog() interface{} {
return struct {
Type string
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
index 0285e99be073..1f60524e3ee8 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package trace contains support for OpenTelemetry distributed tracing.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
index 1e3b426757d9..60a7ed1349b5 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
index d1c86e59b22d..821c83faa1dd 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -1,36 +1,45 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
+import (
+ "slices"
+ "sync"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
// evictedQueue is a FIFO queue with a configurable capacity.
-type evictedQueue struct {
- queue []interface{}
+type evictedQueue[T any] struct {
+ queue []T
capacity int
droppedCount int
+ logDropped func()
}
-func newEvictedQueue(capacity int) evictedQueue {
+func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
// Do not pre-allocate queue, do this lazily.
- return evictedQueue{capacity: capacity}
+ return evictedQueue[Event]{
+ capacity: capacity,
+ logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }),
+ }
+}
+
+func newEvictedQueueLink(capacity int) evictedQueue[Link] {
+ // Do not pre-allocate queue, do this lazily.
+ return evictedQueue[Link]{
+ capacity: capacity,
+ logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }),
+ }
}
// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
// queued value will be discarded and the drop count incremented.
-func (eq *evictedQueue) add(value interface{}) {
+func (eq *evictedQueue[T]) add(value T) {
if eq.capacity == 0 {
eq.droppedCount++
+ eq.logDropped()
return
}
@@ -39,6 +48,12 @@ func (eq *evictedQueue) add(value interface{}) {
copy(eq.queue[:eq.capacity-1], eq.queue[1:])
eq.queue = eq.queue[:eq.capacity-1]
eq.droppedCount++
+ eq.logDropped()
}
eq.queue = append(eq.queue, value)
}
+
+// copy returns a copy of the evictedQueue.
+func (eq *evictedQueue[T]) copy() []T {
+ return slices.Clone(eq.queue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
index bba246041a44..925bcf993057 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
@@ -52,7 +41,12 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
gen.Lock()
defer gen.Unlock()
sid := trace.SpanID{}
- _, _ = gen.randSource.Read(sid[:])
+ for {
+ _, _ = gen.randSource.Read(sid[:])
+ if sid.IsValid() {
+ break
+ }
+ }
return sid
}
@@ -62,9 +56,19 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.
gen.Lock()
defer gen.Unlock()
tid := trace.TraceID{}
- _, _ = gen.randSource.Read(tid[:])
sid := trace.SpanID{}
- _, _ = gen.randSource.Read(sid[:])
+ for {
+ _, _ = gen.randSource.Read(tid[:])
+ if tid.IsValid() {
+ break
+ }
+ }
+ for {
+ _, _ = gen.randSource.Read(sid[:])
+ if sid.IsValid() {
+ break
+ }
+ }
return tid, sid
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
index 19cfea4ba454..c03bdc90f68a 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 7d46c4b48e52..14c2e5bebda0 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
@@ -55,7 +44,7 @@ type tracerProviderConfig struct {
resource *resource.Resource
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
func (cfg tracerProviderConfig) MarshalLog() interface{} {
return struct {
SpanProcessors []SpanProcessor
@@ -302,7 +291,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
retErr = err
} else {
// Poor man's list of errors
- retErr = fmt.Errorf("%v; %v", retErr, err)
+ retErr = fmt.Errorf("%w; %w", retErr, err)
}
}
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
index 02053b318aeb..d2d1f72466be 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index a7bc125b9e8a..ebb6df6c9085 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
index f8770fff79bb..554111bb4a21 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
@@ -36,10 +25,10 @@ var _ SpanProcessor = (*simpleSpanProcessor)(nil)
// send completed spans to the exporter immediately.
//
// This SpanProcessor is not recommended for production use. The synchronous
-// nature of this SpanProcessor make it good for testing, debugging, or
-// showing examples of other feature, but it will be slow and have a high
-// computation resource usage overhead. The BatchSpanProcessor is recommended
-// for production use instead.
+// nature of this SpanProcessor makes it good for testing, debugging, or showing
+// examples of other features, but it will be slow and have a high computation
+// resource usage overhead. The BatchSpanProcessor is recommended for production
+// use instead.
func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
ssp := &simpleSpanProcessor{
exporter: exporter,
@@ -80,10 +69,10 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
//
// A closure is used to keep reference to the exporter and then the
// field is zeroed. This ensures the simpleSpanProcessor is shut down
- // before the exporter. This order is important as it avoids a
- // potential deadlock. If the exporter shut down operation generates a
- // span, that span would need to be exported. Meaning, OnEnd would be
- // called and try acquiring the lock that is held here.
+ // before the exporter. This order is important as it avoids a potential
+ // deadlock. If the exporter shut down operation generates a span, that
+ // span would need to be exported. Meaning, OnEnd would be called and
+ // try acquiring the lock that is held here.
ssp.exporterMu.Lock()
done, shutdown := stopFunc(ssp.exporter)
ssp.exporter = nil
@@ -95,15 +84,15 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
select {
case err = <-done:
case <-ctx.Done():
- // It is possible for the exporter to have immediately shut down
- // and the context to be done simultaneously. In that case this
- // outer select statement will randomly choose a case. This will
- // result in a different returned error for similar scenarios.
- // Instead, double check if the exporter shut down at the same
- // time and return that error if so. This will ensure consistency
- // as well as ensure the caller knows the exporter shut down
- // successfully (they can already determine if the deadline is
- // expired given they passed the context).
+ // It is possible for the exporter to have immediately shut down and
+ // the context to be done simultaneously. In that case this outer
+ // select statement will randomly choose a case. This will result in
+ // a different returned error for similar scenarios. Instead, double
+ // check if the exporter shut down at the same time and return that
+ // error if so. This will ensure consistency as well as ensure
+ // the caller knows the exporter shut down successfully (they can
+ // already determine if the deadline is expired given they passed
+ // the context).
select {
case err = <-done:
default:
@@ -119,7 +108,8 @@ func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
return nil
}
-// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+// MarshalLog is the marshaling function used by the logging system to represent
+// this Span Processor.
func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
return struct {
Type string
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
index 0349b2f198e4..32f862790c7c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 36dbf67764b9..ac90f1a26005 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
@@ -20,6 +9,7 @@ import (
"reflect"
"runtime"
rt "runtime/trace"
+ "slices"
"strings"
"sync"
"time"
@@ -27,10 +17,10 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -147,12 +137,13 @@ type recordingSpan struct {
// ReadOnlySpan exported when the span ends.
attributes []attribute.KeyValue
droppedAttributes int
+ logDropAttrsOnce sync.Once
// events are stored in FIFO queue capped by configured limit.
- events evictedQueue
+ events evictedQueue[Event]
// links are stored in FIFO queue capped by configured limit.
- links evictedQueue
+ links evictedQueue[Link]
// executionTracerTaskEnd ends the execution tracer span.
executionTracerTaskEnd func()
@@ -229,7 +220,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
limit := s.tracer.provider.spanLimits.AttributeCountLimit
if limit == 0 {
// No attributes allowed.
- s.droppedAttributes += len(attributes)
+ s.addDroppedAttr(len(attributes))
return
}
@@ -242,10 +233,11 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
// Otherwise, add without deduplication. When attributes are read they
// will be deduplicated, optimizing the operation.
+ s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes))
for _, a := range attributes {
if !a.Valid() {
// Drop all invalid attributes.
- s.droppedAttributes++
+ s.addDroppedAttr(1)
continue
}
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
@@ -253,6 +245,22 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
}
}
+// Declared as a var so tests can override.
+var logDropAttrs = func() {
+ global.Warn("limit reached: dropping trace Span attributes")
+}
+
+// addDroppedAttr adds incr to the count of dropped attributes.
+//
+// The first, and only the first, time this method is called a warning will be
+// logged.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addDroppedAttr(incr int) {
+ s.droppedAttributes += incr
+ s.logDropAttrsOnce.Do(logDropAttrs)
+}
+
// addOverCapAttrs adds the attributes attrs to the span s while
// de-duplicating the attributes of s and attrs and dropping attributes that
// exceed the limit.
@@ -277,10 +285,12 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
// Now that s.attributes is deduplicated, adding unique attributes up to
// the capacity of s will not over allocate s.attributes.
+ sum := len(attrs) + len(s.attributes)
+ s.attributes = slices.Grow(s.attributes, min(sum, limit))
for _, a := range attrs {
if !a.Valid() {
// Drop all invalid attributes.
- s.droppedAttributes++
+ s.addDroppedAttr(1)
continue
}
@@ -293,7 +303,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
if len(s.attributes) >= limit {
// Do not just drop all of the remaining attributes, make sure
// updates are checked and performed.
- s.droppedAttributes++
+ s.addDroppedAttr(1)
} else {
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
s.attributes = append(s.attributes, a)
@@ -374,7 +384,7 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
// Store the end time as soon as possible to avoid artificially increasing
// the span's duration in case some operation below takes a while.
- et := internal.MonotonicEndTime(s.startTime)
+ et := monotonicEndTime(s.startTime)
// Do relative expensive check now that we have an end time and see if we
// need to do any more processing.
@@ -425,6 +435,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
}
+// monotonicEndTime returns the end time at present but offset from start,
+// monotonically.
+//
+// The monotonic clock is used in subtractions hence the duration since start
+// added back to start gives end as a monotonic time. See
+// https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func monotonicEndTime(start time.Time) time.Time {
+ return start.Add(time.Since(start))
+}
+
// RecordError will record err as a span event for this span. An additional call to
// SetStatus is required if the Status of the Span should be set to Error, this method
// does not change the Span status. If this span is not being recorded or err is nil
@@ -592,7 +612,7 @@ func (s *recordingSpan) Links() []Link {
if len(s.links.queue) == 0 {
return []Link{}
}
- return s.interfaceArrayToLinksArray()
+ return s.links.copy()
}
// Events returns the events of this span.
@@ -602,7 +622,7 @@ func (s *recordingSpan) Events() []Event {
if len(s.events.queue) == 0 {
return []Event{}
}
- return s.interfaceArrayToEventArray()
+ return s.events.copy()
}
// Status returns the status of this span.
@@ -636,8 +656,12 @@ func (s *recordingSpan) Resource() *resource.Resource {
return s.tracer.provider.resource
}
-func (s *recordingSpan) addLink(link trace.Link) {
- if !s.IsRecording() || !link.SpanContext.IsValid() {
+func (s *recordingSpan) AddLink(link trace.Link) {
+ if !s.IsRecording() {
+ return
+ }
+ if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
+ link.SpanContext.TraceState().Len() == 0 {
return
}
@@ -720,32 +744,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan {
}
sd.droppedAttributeCount = s.droppedAttributes
if len(s.events.queue) > 0 {
- sd.events = s.interfaceArrayToEventArray()
+ sd.events = s.events.copy()
sd.droppedEventCount = s.events.droppedCount
}
if len(s.links.queue) > 0 {
- sd.links = s.interfaceArrayToLinksArray()
+ sd.links = s.links.copy()
sd.droppedLinkCount = s.links.droppedCount
}
return &sd
}
-func (s *recordingSpan) interfaceArrayToLinksArray() []Link {
- linkArr := make([]Link, 0)
- for _, value := range s.links.queue {
- linkArr = append(linkArr, value.(Link))
- }
- return linkArr
-}
-
-func (s *recordingSpan) interfaceArrayToEventArray() []Event {
- eventArr := make([]Event, 0)
- for _, value := range s.events.queue {
- eventArr = append(eventArr, value.(Event))
- }
- return eventArr
-}
-
func (s *recordingSpan) addChild() {
if !s.IsRecording() {
return
@@ -810,6 +818,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
// SetName does nothing.
func (nonRecordingSpan) SetName(string) {}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
index c9bd52f7ad42..6bdda3d94a4b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
index aa4d4221db36..bec5e2097876 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
index 9c53657a7190..af7f9177fc52 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index 301e1a7abccf..43419d3b5419 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
@@ -143,13 +132,13 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa
spanKind: trace.ValidateSpanKind(config.SpanKind()),
name: name,
startTime: startTime,
- events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
- links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
+ events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit),
+ links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit),
tracer: tr,
}
for _, l := range config.Links() {
- s.addLink(l)
+ s.AddLink(l)
}
s.SetAttributes(sr.Attributes...)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
index d3457ed1355a..b84dd2c5eeff 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index 422d4c964b3f..33d065a7cb99 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -1,20 +1,9 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.21.0"
+ return "1.28.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
index 12d6b520f528..09e094de9ada 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
index 4a711133a026..aa9e1017156a 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
@@ -72,7 +61,7 @@ func (c *NetConv) Host(address string) []attribute.KeyValue {
attrs := make([]attribute.KeyValue, 0, n)
attrs = append(attrs, c.HostName(h))
if p > 0 {
- attrs = append(attrs, c.HostPort(int(p)))
+ attrs = append(attrs, c.HostPort(p))
}
return attrs
}
@@ -263,7 +252,7 @@ func (c *NetConv) Peer(address string) []attribute.KeyValue {
attrs := make([]attribute.KeyValue, 0, n)
attrs = append(attrs, c.PeerName(h))
if p > 0 {
- attrs = append(attrs, c.PeerPort(int(p)))
+ attrs = append(attrs, c.PeerPort(p))
}
return attrs
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
new file mode 100644
index 000000000000..87b842c5d115
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.17.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
index 71a1f7748d55..e087c9c04d9c 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
index 679c40c4de49..c7b804bbe2e3 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
index 9b8c559de427..137acc67de09 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
index d5c4b5c136aa..d318221e59fd 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md
new file mode 100644
index 000000000000..18ee2f3d623c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.17.0 HTTP conv
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
index fc43808fe44d..76d1dc86b42c 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package httpconv provides OpenTelemetry HTTP semantic conventions for
// tracing telemetry.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
index 39a2eab3a6a9..7e365e82ce81 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
index 42fc525d1657..634a1dce07a3 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
index 8c4a7299d27b..21497bb6bc6f 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
new file mode 100644
index 000000000000..82e1f46b4eaf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.20.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
new file mode 100644
index 000000000000..6685c392b50b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
@@ -0,0 +1,1198 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTP Server spans attributes
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
+ // if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the application
+ // layer protocol used. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the version
+ // of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.protocol.version` refers to the version of the protocol used
+ // and might be different from the protocol client's version. If the HTTP
+ // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If defined for the address
+ // family and if different than `net.host.port` and if `net.sock.host.addr`
+ // is set. In other cases, it is still recommended to set this.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the version of
+// the application layer protocol used. See note below.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
new file mode 100644
index 000000000000..0d1f55a8fe9b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.20.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
new file mode 100644
index 000000000000..63776393217c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
new file mode 100644
index 000000000000..f40c97825aa2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
new file mode 100644
index 000000000000..9c1840631b66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
new file mode 100644
index 000000000000..3d44dae2750b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
@@ -0,0 +1,2060 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
new file mode 100644
index 000000000000..95d0210e38f2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
new file mode 100644
index 000000000000..90b1b0452cc0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
@@ -0,0 +1,2599 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](/~https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/~https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/users/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](/~https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md
new file mode 100644
index 000000000000..bc60aa6039af
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.21.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.21.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.21.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
index e6cf89510536..a9a15a1dab22 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
index 7cf424855e9d..461331a55506 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
@@ -1,20 +1,9 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.21.0 version of the OpenTelemetry specification.
+// patterns for OpenTelemetry things. This package represents the v1.21.0
+// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
index 30ae34fe4782..c09d9317e293 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
index 93d3c1760c92..5184ee339ab6 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
index b6d8935cf973..f7aaa50b9e8d 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
index 66ffd5989f34..be07217d8a56 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
index b5a91450d420..55698cc4471f 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
new file mode 100644
index 000000000000..0b6cbe960cba
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.24.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
new file mode 100644
index 000000000000..6e688345cbb9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
@@ -0,0 +1,4387 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes FaaS attributes.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](/~https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](/~https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// Describes Database attributes
+const (
+ // PoolNameKey is the attribute Key conforming to the "pool.name" semantic
+ // conventions. It represents the name of the connection pool; unique
+ // within the instrumented application. In case the connection pool
+ // implementation doesn't provide a name, then the
+ // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+ // should be used
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ PoolNameKey = attribute.Key("pool.name")
+
+ // StateKey is the attribute Key conforming to the "state" semantic
+ // conventions. It represents the state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'idle'
+ StateKey = attribute.Key("state")
+)
+
+var (
+ // idle
+ StateIdle = StateKey.String("idle")
+ // used
+ StateUsed = StateKey.String("used")
+)
+
+// PoolName returns an attribute KeyValue conforming to the "pool.name"
+// semantic conventions. It represents the name of the connection pool; unique
+// within the instrumented application. In case the connection pool
+// implementation doesn't provide a name, then the
+// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+// should be used
+func PoolName(val string) attribute.KeyValue {
+ return PoolNameKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: experimental
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if the matched endpoint for the
+ // request had a rate-limiting policy.)
+ // Stability: experimental
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (if and only if the request was
+ // not handled.)
+ // Stability: experimental
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If and only if a route was
+ // successfully matched.)
+ // Stability: experimental
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](/~https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// Describes JVM buffer metric attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// Describes System metric attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU metric attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory metric attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging metric attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem metric attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network metric attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process metric attributes
+const (
+ // SystemProcessesStatusKey is the attribute Key conforming to the
+ // "system.processes.status" semantic conventions. It represents the
+ // process state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessesStatusKey = attribute.Key("system.processes.status")
+)
+
+var (
+ // running
+ SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
+ // sleeping
+ SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
+ // stopped
+ SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
+ // defunct
+ SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
+)
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// The attributes used to describe telemetry in the context of databases.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary Cassandra table that the operation is acting upon, including the
+ // keyspace name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+
+ // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id"
+ // semantic conventions. It represents an identifier (address, unique name,
+ // or any other identifier) of the database instance that is executing
+ // queries or mutations on the current connection. This is useful in cases
+ // where the database is running in a clustered environment and the
+ // instrumentation is able to record the node executing the query. The
+ // client may obtain this value in databases like MySQL using queries like
+ // `select @@hostname`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mysql-e26b99z.example.com'
+ DBInstanceIDKey = attribute.Key("db.instance.id")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the MongoDB
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary Cassandra table that the operation is acting upon, including the
+// keyspace name (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// DBInstanceID returns an attribute KeyValue conforming to the
+// "db.instance.id" semantic conventions. It represents an identifier (address,
+// unique name, or any other identifier) of the database instance that is
+// executing queries or mutations on the current connection. This is useful in
+// cases where the database is running in a clustered environment and the
+// instrumentation is able to record the node executing the query. The client
+// may obtain this value in databases like MySQL using queries like `select
+// @@hostname`.
+func DBInstanceID(val string) attribute.KeyValue {
+ return DBInstanceIDKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the MongoDB
+// collection being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Deprecated: use `http.request.method` instead.
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ // Deprecated: use `http.request.header.content-length` instead.
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ // Deprecated: use `http.response.header.content-length` instead.
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'http', 'https'
+ // Deprecated: use `url.scheme` instead.
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 200
+ // Deprecated: use `http.response.status_code` instead.
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/search?q=OpenTelemetry#SemConv'
+ // Deprecated: use `url.path` and `url.query` instead.
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Deprecated: use `url.full` instead.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1'
+ // Deprecated: use `user_agent.original` instead.
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+)
+
+var (
+ // HTTP/1.0
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions.
+//
+// Deprecated: use `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions.
+//
+// Deprecated: use `http.request.header.content-length` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions.
+//
+// Deprecated: use `http.response.header.content-length` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions.
+//
+// Deprecated: use `url.scheme` instead.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions.
+//
+// Deprecated: use `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions.
+//
+// Deprecated: use `url.path` and `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions.
+//
+// Deprecated: use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions.
+//
+// Deprecated: use `user_agent.original` instead.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ // Deprecated: use `server.address`.
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `server.port`.
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ // Deprecated: use `server.address` on client spans and `client.address` on
+ // server spans.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `server.port` on client spans and `client.port` on
+ // server spans.
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Deprecated: use `network.protocol.name`.
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '3.1.1'
+ // Deprecated: use `network.protocol.version`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ // Deprecated: use `network.local.address`.
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `network.local.port`.
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '192.168.0.1'
+ // Deprecated: use `network.peer.address`.
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ // Deprecated: no replacement at this time.
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 65531
+ // Deprecated: use `network.peer.port`.
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.transport`.
+ NetTransportKey = attribute.Key("net.transport")
+)
+
+var (
+ // IPv4 address
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // ip_tcp
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions.
+//
+// Deprecated: use `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions.
+//
+// Deprecated: use `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions.
+//
+// Deprecated: use `server.address` on client spans and `client.address` on
+// server spans.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions.
+//
+// Deprecated: use `server.port` on client spans and `client.port` on server
+// spans.
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions.
+//
+// Deprecated: use `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions.
+//
+// Deprecated: use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions.
+//
+// Deprecated: use `network.local.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions.
+//
+// Deprecated: use `network.local.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions.
+//
+// Deprecated: use `network.peer.address`.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions.
+//
+// Deprecated: no replacement at this time.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions.
+//
+// Deprecated: use `network.peer.port`.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable and SHOULD have low
+ // cardinality.
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client_id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client_id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents an identifier for
+ // the messaging system being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationCreate = MessagingOperationKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs
+ MessagingOperationDeliver = MessagingOperationKey.String("deliver")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid")
+ // Azure Event Hubs
+ MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs")
+ // Azure Service Bus
+ MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // version of the protocol specified in `network.protocol.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `network.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the protocol specified in `network.protocol.name`.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](/~https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed) and SHOULD NOT be validated or modified except for
+ // sanitizing purposes.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ URLPathKey = attribute.Key("url.path")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in query string SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+)
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
new file mode 100644
index 000000000000..d27e8a8f8b3f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.24.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
new file mode 100644
index 000000000000..6c019aafc3ef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
@@ -0,0 +1,200 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This event represents an occurrence of a lifecycle transition on the iOS
+// platform.
+const (
+ // IosStateKey is the attribute Key conforming to the "ios.state" semantic
+ // conventions. It represents the this attribute represents the state the
+ // application has transitioned into at the occurrence of the event.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
+ // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
+ // and from which the `OS terminology` column values are derived.
+ IosStateKey = attribute.Key("ios.state")
+)
+
+var (
+ // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
+ IosStateActive = IosStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
+ IosStateInactive = IosStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
+ IosStateBackground = IosStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
+ IosStateForeground = IosStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
+ IosStateTerminate = IosStateKey.String("terminate")
+)
+
+// This event represents an occurrence of a lifecycle transition on the Android
+// platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the this attribute represents the
+ // state the application has transitioned into at the occurrence of the
+ // event.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
new file mode 100644
index 000000000000..7235bb51d9a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
new file mode 100644
index 000000000000..a6b953f625e5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
@@ -0,0 +1,1071 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+const (
+
+ // DBClientConnectionsUsage is the metric conforming to the
+ // "db.client.connections.usage" semantic conventions. It represents the number
+ // of connections that are currently in state described by the `state`
+ // attribute.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsUsageName = "db.client.connections.usage"
+ DBClientConnectionsUsageUnit = "{connection}"
+ DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+ // DBClientConnectionsIdleMax is the metric conforming to the
+ // "db.client.connections.idle.max" semantic conventions. It represents the
+ // maximum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
+ DBClientConnectionsIdleMaxUnit = "{connection}"
+ DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+ // DBClientConnectionsIdleMin is the metric conforming to the
+ // "db.client.connections.idle.min" semantic conventions. It represents the
+ // minimum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
+ DBClientConnectionsIdleMinUnit = "{connection}"
+ DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed"
+
+ // DBClientConnectionsMax is the metric conforming to the
+ // "db.client.connections.max" semantic conventions. It represents the maximum
+ // number of open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsMaxName = "db.client.connections.max"
+ DBClientConnectionsMaxUnit = "{connection}"
+ DBClientConnectionsMaxDescription = "The maximum number of open connections allowed"
+
+ // DBClientConnectionsPendingRequests is the metric conforming to the
+ // "db.client.connections.pending_requests" semantic conventions. It represents
+ // the number of pending requests for an open connection, cumulative for the
+ // entire pool.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
+ DBClientConnectionsPendingRequestsUnit = "{request}"
+ DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+ // DBClientConnectionsTimeouts is the metric conforming to the
+ // "db.client.connections.timeouts" semantic conventions. It represents the
+ // number of connection timeouts that have occurred trying to obtain a
+ // connection from the pool.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
+ DBClientConnectionsTimeoutsUnit = "{timeout}"
+ DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+ // DBClientConnectionsCreateTime is the metric conforming to the
+ // "db.client.connections.create_time" semantic conventions. It represents the
+ // time it took to create a new connection.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
+ DBClientConnectionsCreateTimeUnit = "ms"
+ DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection"
+
+ // DBClientConnectionsWaitTime is the metric conforming to the
+ // "db.client.connections.wait_time" semantic conventions. It represents the
+ // time it took to obtain an open connection from the pool.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
+ DBClientConnectionsWaitTimeUnit = "ms"
+ DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+ // DBClientConnectionsUseTime is the metric conforming to the
+ // "db.client.connections.use_time" semantic conventions. It represents the
+ // time between borrowing a connection and returning it to the pool.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsUseTimeName = "db.client.connections.use_time"
+ DBClientConnectionsUseTimeUnit = "ms"
+ DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+ // AspnetcoreRoutingMatchAttempts is the metric conforming to the
+ // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
+ // number of requests that were attempted to be matched to an endpoint.
+ // Instrument: counter
+ // Unit: {match_attempt}
+ // Stability: Experimental
+ AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
+ AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
+ AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
+
+ // AspnetcoreDiagnosticsExceptions is the metric conforming to the
+ // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
+ // number of exceptions caught by exception handling middleware.
+ // Instrument: counter
+ // Unit: {exception}
+ // Stability: Experimental
+ AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
+ AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
+ AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
+
+ // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
+ // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
+ // represents the number of requests that are currently active on the server
+ // that hold a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
+ AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
+ AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
+ // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
+ // represents the duration of rate limiting lease held by requests on the
+ // server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
+ AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
+ AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
+
+ // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
+ // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
+ // represents the time the request spent in a queue waiting to acquire a rate
+ // limiting lease.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
+ AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
+ AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
+ // represents the number of requests that are currently queued, waiting to
+ // acquire a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
+ AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
+ AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
+ // number of requests that tried to acquire a rate limiting lease.
+ // Instrument: counter
+ // Unit: {request}
+ // Stability: Experimental
+ AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
+ AspnetcoreRateLimitingRequestsUnit = "{request}"
+ AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
+
+ // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+ // semantic conventions. It represents the measures the time taken to perform a
+ // DNS lookup.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DNSLookupDurationName = "dns.lookup.duration"
+ DNSLookupDurationUnit = "s"
+ DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
+
+ // HTTPClientOpenConnections is the metric conforming to the
+ // "http.client.open_connections" semantic conventions. It represents the
+ // number of outbound HTTP connections that are currently active or idle on the
+ // client.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ HTTPClientOpenConnectionsName = "http.client.open_connections"
+ HTTPClientOpenConnectionsUnit = "{connection}"
+ HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+ // HTTPClientConnectionDuration is the metric conforming to the
+ // "http.client.connection.duration" semantic conventions. It represents the
+ // duration of the successfully established outbound HTTP connections.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientConnectionDurationName = "http.client.connection.duration"
+ HTTPClientConnectionDurationUnit = "s"
+ HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+ // HTTPClientActiveRequests is the metric conforming to the
+ // "http.client.active_requests" semantic conventions. It represents the number
+ // of active HTTP requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPClientActiveRequestsName = "http.client.active_requests"
+ HTTPClientActiveRequestsUnit = "{request}"
+ HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
+ // HTTPClientRequestTimeInQueue is the metric conforming to the
+ // "http.client.request.time_in_queue" semantic conventions. It represents the
+ // amount of time requests spent on a queue waiting for an available
+ // connection.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue"
+ HTTPClientRequestTimeInQueueUnit = "s"
+ HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection."
+
+ // KestrelActiveConnections is the metric conforming to the
+ // "kestrel.active_connections" semantic conventions. It represents the number
+ // of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelActiveConnectionsName = "kestrel.active_connections"
+ KestrelActiveConnectionsUnit = "{connection}"
+ KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // KestrelConnectionDuration is the metric conforming to the
+ // "kestrel.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ KestrelConnectionDurationName = "kestrel.connection.duration"
+ KestrelConnectionDurationUnit = "s"
+ KestrelConnectionDurationDescription = "The duration of connections on the server."
+
+ // KestrelRejectedConnections is the metric conforming to the
+ // "kestrel.rejected_connections" semantic conventions. It represents the
+ // number of connections rejected by the server.
+ // Instrument: counter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelRejectedConnectionsName = "kestrel.rejected_connections"
+ KestrelRejectedConnectionsUnit = "{connection}"
+ KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
+
+ // KestrelQueuedConnections is the metric conforming to the
+ // "kestrel.queued_connections" semantic conventions. It represents the number
+ // of connections that are currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelQueuedConnectionsName = "kestrel.queued_connections"
+ KestrelQueuedConnectionsUnit = "{connection}"
+ KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
+
+ // KestrelQueuedRequests is the metric conforming to the
+ // "kestrel.queued_requests" semantic conventions. It represents the number of
+ // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
+ // currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ KestrelQueuedRequestsName = "kestrel.queued_requests"
+ KestrelQueuedRequestsUnit = "{request}"
+ KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
+
+ // KestrelUpgradedConnections is the metric conforming to the
+ // "kestrel.upgraded_connections" semantic conventions. It represents the
+ // number of connections that are currently upgraded (WebSockets). .
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
+ KestrelUpgradedConnectionsUnit = "{connection}"
+ KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
+
+ // KestrelTLSHandshakeDuration is the metric conforming to the
+ // "kestrel.tls_handshake.duration" semantic conventions. It represents the
+ // duration of TLS handshakes on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
+ KestrelTLSHandshakeDurationUnit = "s"
+ KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
+
+ // KestrelActiveTLSHandshakes is the metric conforming to the
+ // "kestrel.active_tls_handshakes" semantic conventions. It represents the
+ // number of TLS handshakes that are currently in progress on the server.
+ // Instrument: updowncounter
+ // Unit: {handshake}
+ // Stability: Experimental
+ KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
+ KestrelActiveTLSHandshakesUnit = "{handshake}"
+ KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
+
+ // SignalrServerConnectionDuration is the metric conforming to the
+ // "signalr.server.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ SignalrServerConnectionDurationName = "signalr.server.connection.duration"
+ SignalrServerConnectionDurationUnit = "s"
+ SignalrServerConnectionDurationDescription = "The duration of connections on the server."
+
+ // SignalrServerActiveConnections is the metric conforming to the
+ // "signalr.server.active_connections" semantic conventions. It represents the
+ // number of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ SignalrServerActiveConnectionsName = "signalr.server.active_connections"
+ SignalrServerActiveConnectionsUnit = "{connection}"
+ SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's logic execution.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInvokeDurationName = "faas.invoke_duration"
+ FaaSInvokeDurationUnit = "s"
+ FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
+
+ // FaaSInitDuration is the metric conforming to the "faas.init_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's initialization, such as a cold start.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInitDurationName = "faas.init_duration"
+ FaaSInitDurationUnit = "s"
+ FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
+
+ // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
+ // conventions. It represents the number of invocation cold starts.
+ // Instrument: counter
+ // Unit: {coldstart}
+ // Stability: Experimental
+ FaaSColdstartsName = "faas.coldstarts"
+ FaaSColdstartsUnit = "{coldstart}"
+ FaaSColdstartsDescription = "Number of invocation cold starts"
+
+ // FaaSErrors is the metric conforming to the "faas.errors" semantic
+ // conventions. It represents the number of invocation errors.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ FaaSErrorsName = "faas.errors"
+ FaaSErrorsUnit = "{error}"
+ FaaSErrorsDescription = "Number of invocation errors"
+
+ // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
+ // conventions. It represents the number of successful invocations.
+ // Instrument: counter
+ // Unit: {invocation}
+ // Stability: Experimental
+ FaaSInvocationsName = "faas.invocations"
+ FaaSInvocationsUnit = "{invocation}"
+ FaaSInvocationsDescription = "Number of successful invocations"
+
+ // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
+ // conventions. It represents the number of invocation timeouts.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ FaaSTimeoutsName = "faas.timeouts"
+ FaaSTimeoutsUnit = "{timeout}"
+ FaaSTimeoutsDescription = "Number of invocation timeouts"
+
+ // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
+ // conventions. It represents the distribution of max memory usage per
+ // invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSMemUsageName = "faas.mem_usage"
+ FaaSMemUsageUnit = "By"
+ FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
+
+ // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
+ // conventions. It represents the distribution of CPU usage per invocation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSCPUUsageName = "faas.cpu_usage"
+ FaaSCPUUsageUnit = "s"
+ FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
+
+ // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
+ // conventions. It represents the distribution of net I/O usage per invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSNetIoName = "faas.net_io"
+ FaaSNetIoUnit = "By"
+ FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
+
+ // HTTPServerRequestDuration is the metric conforming to the
+ // "http.server.request.duration" semantic conventions. It represents the
+ // duration of HTTP server requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPServerRequestDurationName = "http.server.request.duration"
+ HTTPServerRequestDurationUnit = "s"
+ HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
+
+ // HTTPServerActiveRequests is the metric conforming to the
+ // "http.server.active_requests" semantic conventions. It represents the number
+ // of active HTTP server requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPServerActiveRequestsName = "http.server.active_requests"
+ HTTPServerActiveRequestsUnit = "{request}"
+ HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
+
+ // HTTPServerRequestBodySize is the metric conforming to the
+ // "http.server.request.body.size" semantic conventions. It represents the size
+ // of HTTP server request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerRequestBodySizeName = "http.server.request.body.size"
+ HTTPServerRequestBodySizeUnit = "By"
+ HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
+
+ // HTTPServerResponseBodySize is the metric conforming to the
+ // "http.server.response.body.size" semantic conventions. It represents the
+ // size of HTTP server response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerResponseBodySizeName = "http.server.response.body.size"
+ HTTPServerResponseBodySizeUnit = "By"
+ HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
+
+ // HTTPClientRequestDuration is the metric conforming to the
+ // "http.client.request.duration" semantic conventions. It represents the
+ // duration of HTTP client requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPClientRequestDurationName = "http.client.request.duration"
+ HTTPClientRequestDurationUnit = "s"
+ HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
+
+ // HTTPClientRequestBodySize is the metric conforming to the
+ // "http.client.request.body.size" semantic conventions. It represents the size
+ // of HTTP client request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientRequestBodySizeName = "http.client.request.body.size"
+ HTTPClientRequestBodySizeUnit = "By"
+ HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
+
+ // HTTPClientResponseBodySize is the metric conforming to the
+ // "http.client.response.body.size" semantic conventions. It represents the
+ // size of HTTP client response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientResponseBodySizeName = "http.client.response.body.size"
+ HTTPClientResponseBodySizeUnit = "By"
+ HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
+
+ // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
+ // conventions. It represents the measure of initial memory requested.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmMemoryInitName = "jvm.memory.init"
+ JvmMemoryInitUnit = "By"
+ JvmMemoryInitDescription = "Measure of initial memory requested."
+
+ // JvmSystemCPUUtilization is the metric conforming to the
+ // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
+ // CPU utilization for the whole system as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
+ JvmSystemCPUUtilizationUnit = "1"
+ JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
+
+ // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
+ // semantic conventions. It represents the average CPU load of the whole system
+ // for the last minute as reported by the JVM.
+ // Instrument: gauge
+ // Unit: {run_queue_item}
+ // Stability: Experimental
+ JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
+ JvmSystemCPULoad1mUnit = "{run_queue_item}"
+ JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
+
+ // JvmBufferMemoryUsage is the metric conforming to the
+ // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
+ // memory used by buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
+ JvmBufferMemoryUsageUnit = "By"
+ JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
+
+ // JvmBufferMemoryLimit is the metric conforming to the
+ // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
+ // total memory capacity of buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
+ JvmBufferMemoryLimitUnit = "By"
+ JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
+
+ // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
+ // conventions. It represents the number of buffers in the pool.
+ // Instrument: updowncounter
+ // Unit: {buffer}
+ // Stability: Experimental
+ JvmBufferCountName = "jvm.buffer.count"
+ JvmBufferCountUnit = "{buffer}"
+ JvmBufferCountDescription = "Number of buffers in the pool."
+
+ // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
+ // conventions. It represents the measure of memory used.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedName = "jvm.memory.used"
+ JvmMemoryUsedUnit = "By"
+ JvmMemoryUsedDescription = "Measure of memory used."
+
+ // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
+ // semantic conventions. It represents the measure of memory committed.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryCommittedName = "jvm.memory.committed"
+ JvmMemoryCommittedUnit = "By"
+ JvmMemoryCommittedDescription = "Measure of memory committed."
+
+ // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
+ // conventions. It represents the measure of max obtainable memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryLimitName = "jvm.memory.limit"
+ JvmMemoryLimitUnit = "By"
+ JvmMemoryLimitDescription = "Measure of max obtainable memory."
+
+ // JvmMemoryUsedAfterLastGc is the metric conforming to the
+ // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
+ // measure of memory used, as measured after the most recent garbage collection
+ // event on this pool.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
+ JvmMemoryUsedAfterLastGcUnit = "By"
+ JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
+
+ // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
+ // conventions. It represents the duration of JVM garbage collection actions.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ JvmGcDurationName = "jvm.gc.duration"
+ JvmGcDurationUnit = "s"
+ JvmGcDurationDescription = "Duration of JVM garbage collection actions."
+
+ // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
+ // conventions. It represents the number of executing platform threads.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Stable
+ JvmThreadCountName = "jvm.thread.count"
+ JvmThreadCountUnit = "{thread}"
+ JvmThreadCountDescription = "Number of executing platform threads."
+
+ // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
+ // conventions. It represents the number of classes loaded since JVM start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassLoadedName = "jvm.class.loaded"
+ JvmClassLoadedUnit = "{class}"
+ JvmClassLoadedDescription = "Number of classes loaded since JVM start."
+
+ // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
+ // semantic conventions. It represents the number of classes unloaded since JVM
+ // start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassUnloadedName = "jvm.class.unloaded"
+ JvmClassUnloadedUnit = "{class}"
+ JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
+
+ // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
+ // conventions. It represents the number of classes currently loaded.
+ // Instrument: updowncounter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassCountName = "jvm.class.count"
+ JvmClassCountUnit = "{class}"
+ JvmClassCountDescription = "Number of classes currently loaded."
+
+ // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
+ // conventions. It represents the number of processors available to the Java
+ // virtual machine.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Stable
+ JvmCPUCountName = "jvm.cpu.count"
+ JvmCPUCountUnit = "{cpu}"
+ JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
+
+ // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
+ // conventions. It represents the cPU time used by the process as reported by
+ // the JVM.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Stable
+ JvmCPUTimeName = "jvm.cpu.time"
+ JvmCPUTimeUnit = "s"
+ JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
+
+ // JvmCPURecentUtilization is the metric conforming to the
+ // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
+ // CPU utilization for the process as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Stable
+ JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
+ JvmCPURecentUtilizationUnit = "1"
+ JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
+
+ // MessagingPublishDuration is the metric conforming to the
+ // "messaging.publish.duration" semantic conventions. It represents the
+ // measures the duration of publish operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingPublishDurationName = "messaging.publish.duration"
+ MessagingPublishDurationUnit = "s"
+ MessagingPublishDurationDescription = "Measures the duration of publish operation."
+
+ // MessagingReceiveDuration is the metric conforming to the
+ // "messaging.receive.duration" semantic conventions. It represents the
+ // measures the duration of receive operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingReceiveDurationName = "messaging.receive.duration"
+ MessagingReceiveDurationUnit = "s"
+ MessagingReceiveDurationDescription = "Measures the duration of receive operation."
+
+ // MessagingDeliverDuration is the metric conforming to the
+ // "messaging.deliver.duration" semantic conventions. It represents the
+ // measures the duration of deliver operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingDeliverDurationName = "messaging.deliver.duration"
+ MessagingDeliverDurationUnit = "s"
+ MessagingDeliverDurationDescription = "Measures the duration of deliver operation."
+
+ // MessagingPublishMessages is the metric conforming to the
+ // "messaging.publish.messages" semantic conventions. It represents the
+ // measures the number of published messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingPublishMessagesName = "messaging.publish.messages"
+ MessagingPublishMessagesUnit = "{message}"
+ MessagingPublishMessagesDescription = "Measures the number of published messages."
+
+ // MessagingReceiveMessages is the metric conforming to the
+ // "messaging.receive.messages" semantic conventions. It represents the
+ // measures the number of received messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingReceiveMessagesName = "messaging.receive.messages"
+ MessagingReceiveMessagesUnit = "{message}"
+ MessagingReceiveMessagesDescription = "Measures the number of received messages."
+
+ // MessagingDeliverMessages is the metric conforming to the
+ // "messaging.deliver.messages" semantic conventions. It represents the
+ // measures the number of delivered messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingDeliverMessagesName = "messaging.deliver.messages"
+ MessagingDeliverMessagesUnit = "{message}"
+ MessagingDeliverMessagesDescription = "Measures the number of delivered messages."
+
+ // RPCServerDuration is the metric conforming to the "rpc.server.duration"
+ // semantic conventions. It represents the measures the duration of inbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCServerDurationName = "rpc.server.duration"
+ RPCServerDurationUnit = "ms"
+ RPCServerDurationDescription = "Measures the duration of inbound RPC."
+
+ // RPCServerRequestSize is the metric conforming to the
+ // "rpc.server.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerRequestSizeName = "rpc.server.request.size"
+ RPCServerRequestSizeUnit = "By"
+ RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCServerResponseSize is the metric conforming to the
+ // "rpc.server.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerResponseSizeName = "rpc.server.response.size"
+ RPCServerResponseSizeUnit = "By"
+ RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCServerRequestsPerRPC is the metric conforming to the
+ // "rpc.server.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
+ RPCServerRequestsPerRPCUnit = "{count}"
+ RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCServerResponsesPerRPC is the metric conforming to the
+ // "rpc.server.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
+ RPCServerResponsesPerRPCUnit = "{count}"
+ RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // RPCClientDuration is the metric conforming to the "rpc.client.duration"
+ // semantic conventions. It represents the measures the duration of outbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCClientDurationName = "rpc.client.duration"
+ RPCClientDurationUnit = "ms"
+ RPCClientDurationDescription = "Measures the duration of outbound RPC."
+
+ // RPCClientRequestSize is the metric conforming to the
+ // "rpc.client.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientRequestSizeName = "rpc.client.request.size"
+ RPCClientRequestSizeUnit = "By"
+ RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCClientResponseSize is the metric conforming to the
+ // "rpc.client.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientResponseSizeName = "rpc.client.response.size"
+ RPCClientResponseSizeUnit = "By"
+ RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCClientRequestsPerRPC is the metric conforming to the
+ // "rpc.client.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
+ RPCClientRequestsPerRPCUnit = "{count}"
+ RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCClientResponsesPerRPC is the metric conforming to the
+ // "rpc.client.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
+ RPCClientResponsesPerRPCUnit = "{count}"
+ RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
+ // conventions. It represents the seconds each logical CPU spent on each mode.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemCPUTimeName = "system.cpu.time"
+ SystemCPUTimeUnit = "s"
+ SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
+
+ // SystemCPUUtilization is the metric conforming to the
+ // "system.cpu.utilization" semantic conventions. It represents the difference
+ // in system.cpu.time since the last measurement, divided by the elapsed time
+ // and number of logical CPUs.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ SystemCPUUtilizationName = "system.cpu.utilization"
+ SystemCPUUtilizationUnit = "1"
+ SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
+
+ // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
+ // semantic conventions. It represents the reports the current frequency of the
+ // CPU in Hz.
+ // Instrument: gauge
+ // Unit: {Hz}
+ // Stability: Experimental
+ SystemCPUFrequencyName = "system.cpu.frequency"
+ SystemCPUFrequencyUnit = "{Hz}"
+ SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
+
+ // SystemCPUPhysicalCount is the metric conforming to the
+ // "system.cpu.physical.count" semantic conventions. It represents the reports
+ // the number of actual physical processor cores on the hardware.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPUPhysicalCountName = "system.cpu.physical.count"
+ SystemCPUPhysicalCountUnit = "{cpu}"
+ SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
+
+ // SystemCPULogicalCount is the metric conforming to the
+ // "system.cpu.logical.count" semantic conventions. It represents the reports
+ // the number of logical (virtual) processor cores created by the operating
+ // system to manage multitasking.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPULogicalCountName = "system.cpu.logical.count"
+ SystemCPULogicalCountUnit = "{cpu}"
+ SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
+
+ // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
+ // semantic conventions. It represents the reports memory in use by state.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryUsageName = "system.memory.usage"
+ SystemMemoryUsageUnit = "By"
+ SystemMemoryUsageDescription = "Reports memory in use by state."
+
+ // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
+ // semantic conventions. It represents the total memory available in the
+ // system.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryLimitName = "system.memory.limit"
+ SystemMemoryLimitUnit = "By"
+ SystemMemoryLimitDescription = "Total memory available in the system."
+
+ // SystemMemoryUtilization is the metric conforming to the
+ // "system.memory.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemMemoryUtilizationName = "system.memory.utilization"
+ SystemMemoryUtilizationUnit = "1"
+
+ // SystemPagingUsage is the metric conforming to the "system.paging.usage"
+ // semantic conventions. It represents the unix swap or windows pagefile usage.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemPagingUsageName = "system.paging.usage"
+ SystemPagingUsageUnit = "By"
+ SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
+
+ // SystemPagingUtilization is the metric conforming to the
+ // "system.paging.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingUtilizationName = "system.paging.utilization"
+ SystemPagingUtilizationUnit = "1"
+
+ // SystemPagingFaults is the metric conforming to the "system.paging.faults"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingFaultsName = "system.paging.faults"
+ SystemPagingFaultsUnit = "{fault}"
+
+ // SystemPagingOperations is the metric conforming to the
+ // "system.paging.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingOperationsName = "system.paging.operations"
+ SystemPagingOperationsUnit = "{operation}"
+
+ // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskIoName = "system.disk.io"
+ SystemDiskIoUnit = "By"
+
+ // SystemDiskOperations is the metric conforming to the
+ // "system.disk.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskOperationsName = "system.disk.operations"
+ SystemDiskOperationsUnit = "{operation}"
+
+ // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
+ // semantic conventions. It represents the time disk spent activated.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskIoTimeName = "system.disk.io_time"
+ SystemDiskIoTimeUnit = "s"
+ SystemDiskIoTimeDescription = "Time disk spent activated"
+
+ // SystemDiskOperationTime is the metric conforming to the
+ // "system.disk.operation_time" semantic conventions. It represents the sum of
+ // the time each operation took to complete.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskOperationTimeName = "system.disk.operation_time"
+ SystemDiskOperationTimeUnit = "s"
+ SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
+
+ // SystemDiskMerged is the metric conforming to the "system.disk.merged"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskMergedName = "system.disk.merged"
+ SystemDiskMergedUnit = "{operation}"
+
+ // SystemFilesystemUsage is the metric conforming to the
+ // "system.filesystem.usage" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUsageName = "system.filesystem.usage"
+ SystemFilesystemUsageUnit = "By"
+
+ // SystemFilesystemUtilization is the metric conforming to the
+ // "system.filesystem.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUtilizationName = "system.filesystem.utilization"
+ SystemFilesystemUtilizationUnit = "1"
+
+ // SystemNetworkDropped is the metric conforming to the
+ // "system.network.dropped" semantic conventions. It represents the count of
+ // packets that are dropped or discarded even though there was no error.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ SystemNetworkDroppedName = "system.network.dropped"
+ SystemNetworkDroppedUnit = "{packet}"
+ SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
+
+ // SystemNetworkPackets is the metric conforming to the
+ // "system.network.packets" semantic conventions.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkPacketsName = "system.network.packets"
+ SystemNetworkPacketsUnit = "{packet}"
+
+ // SystemNetworkErrors is the metric conforming to the "system.network.errors"
+ // semantic conventions. It represents the count of network errors detected.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ SystemNetworkErrorsName = "system.network.errors"
+ SystemNetworkErrorsUnit = "{error}"
+ SystemNetworkErrorsDescription = "Count of network errors detected"
+
+ // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkIoName = "system.network.io"
+ SystemNetworkIoUnit = "By"
+
+ // SystemNetworkConnections is the metric conforming to the
+ // "system.network.connections" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkConnectionsName = "system.network.connections"
+ SystemNetworkConnectionsUnit = "{connection}"
+
+ // SystemProcessesCount is the metric conforming to the
+ // "system.processes.count" semantic conventions. It represents the total
+ // number of processes in each state.
+ // Instrument: updowncounter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessesCountName = "system.processes.count"
+ SystemProcessesCountUnit = "{process}"
+ SystemProcessesCountDescription = "Total number of processes in each state"
+
+ // SystemProcessesCreated is the metric conforming to the
+ // "system.processes.created" semantic conventions. It represents the total
+ // number of processes created over uptime of the host.
+ // Instrument: counter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessesCreatedName = "system.processes.created"
+ SystemProcessesCreatedUnit = "{process}"
+ SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host"
+
+ // SystemLinuxMemoryAvailable is the metric conforming to the
+ // "system.linux.memory.available" semantic conventions. It represents an
+ // estimate of how much memory is available for starting new applications,
+ // without causing swapping.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemLinuxMemoryAvailableName = "system.linux.memory.available"
+ SystemLinuxMemoryAvailableUnit = "By"
+ SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
new file mode 100644
index 000000000000..d66bbe9c23df
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
@@ -0,0 +1,2545 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assinged by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](/~https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val int) attribute.KeyValue {
+ return HostCPUSteppingKey.Int(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](/~https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](/~https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ // Deprecated: use the `otel.scope.name` attribute.
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ // Deprecated: use the `otel.scope.version` attribute.
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions.
+//
+// Deprecated: use the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions.
+//
+// Deprecated: use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
new file mode 100644
index 000000000000..fe80b1731d02
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
new file mode 100644
index 000000000000..c1718234e522
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
@@ -0,0 +1,1323 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '/~https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+)
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+)
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
new file mode 100644
index 000000000000..2de1fc3c6beb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.26.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
new file mode 100644
index 000000000000..d8dc822b2633
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
@@ -0,0 +1,8996 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: stable
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
+ // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
+ // It represents the aSP.NET Core exception middleware handling result
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'handled', 'unhandled'
+ AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+
+ // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
+ // "aspnetcore.routing.match_status" semantic conventions. It represents
+ // the match result - success or failure
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'success', 'failure'
+ AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+var (
+ // Exception was handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
+ // Exception was not handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
+ // Exception handling was skipped because the response had started
+ AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
+ // Exception handling didn't run because the request was aborted
+ AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
+)
+
+var (
+ // Match succeeded
+ AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
+ // Match failed
+ AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// Generic attributes for AWS services.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes for AWS DynamoDB.
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// Attributes for AWS Elastic Container Service (ECS).
+const (
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID
+ // MUST be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
+ // populated.)
+ // Stability: experimental
+ // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
+ // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
+ // running [ECS
+ // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
+ // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family
+ // name of the [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+ // used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the
+// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
+// ECS task. The ID MUST be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS
+// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+// used to create the ECS task.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Attributes for AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Attributes for AWS Logs.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Attributes for AWS Lambda.
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for AWS S3.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// The web browser attributes
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Apps
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Attributes for CloudEvents.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/~https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](/~https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCPUStateKey is the attribute Key conforming to the
+ // "container.cpu.state" semantic conventions. It represents the CPU state
+ // for this data point.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'user', 'kernel'
+ ContainerCPUStateKey = attribute.Key("container.cpu.state")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](/~https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+var (
+ // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
+ ContainerCPUStateUser = ContainerCPUStateKey.String("user")
+ // When CPU is used by the system (host OS)
+ ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
+ // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
+ ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// This group defines the attributes used to describe telemetry in the context
+// of databases.
+const (
+ // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
+ // "db.client.connections.pool.name" semantic conventions. It represents
+ // the name of the connection pool; unique within the instrumented
+ // application. In case the connection pool implementation doesn't provide
+ // a name, instrumentation should use a combination of `server.address` and
+ // `server.port` attributes formatted as `server.address:server.port`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
+
+ // DBClientConnectionsStateKey is the attribute Key conforming to the
+ // "db.client.connections.state" semantic conventions. It represents the
+ // state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle'
+ DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: If the collection name is parsed from the query, it SHOULD match
+ // the value provided in the query and may be qualified with the schema and
+ // database name.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
+ // semantic conventions. It represents the name of the database, fully
+ // qualified within the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'test.users'
+ // Note: If a database system has multiple namespace components, they
+ // SHOULD be concatenated (potentially using database system specific
+ // conventions) from most general to most specific namespace component, and
+ // more specific namespaces SHOULD NOT be captured without the more general
+ // namespaces, to ensure that "startswith" queries for the more general
+ // namespaces will be valid.
+ // Semantic conventions for individual database systems SHOULD document
+ // what `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationNameKey is the attribute Key conforming to the
+ // "db.operation.name" semantic conventions. It represents the name of the
+ // operation or command being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: It is RECOMMENDED to capture the value as provided by the
+ // application without attempting to do any case normalization.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
+ // "WuValue"'
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents the database management system (DBMS) product
+ // as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual DBMS may differ from the one identified by the client.
+ // For example, when using PostgreSQL client libraries to connect to a
+ // CockroachDB, the `db.system` is set to `postgresql` based on the
+ // instrumentation's best knowledge.
+ DBSystemKey = attribute.Key("db.system")
+)
+
+var (
+ // idle
+ DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
+ // used
+ DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
+// the "db.client.connections.pool.name" semantic conventions. It represents
+// the name of the connection pool; unique within the instrumented application.
+// In case the connection pool implementation doesn't provide a name,
+// instrumentation should use a combination of `server.address` and
+// `server.port` attributes formatted as `server.address:server.port`.
+func DBClientConnectionsPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionsPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the
+// "db.namespace" semantic conventions. It represents the name of the database,
+// fully qualified within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the
+// "db.query.text" semantic conventions. It represents the database query being
+// executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// This group defines attributes for Cassandra.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// This group defines attributes for Azure Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// This group defines attributes for Elasticsearch.
+const (
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+)
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// Attributes for software deployments.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// Attributes that represents an occurrence of a lifecycle transition on the
+// Android platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the deprecated use the
+ // `device.app.lifecycle` event definition including `android.state` as a
+ // payload field instead.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report a DNS query.
+const (
+ // DNSQuestionNameKey is the attribute Key conforming to the
+ // "dns.question.name" semantic conventions. It represents the name being
+ // queried.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.example.com', 'opentelemetry.io'
+ // Note: If the name field contains non-printable characters (below 32 or
+ // above 126), those characters should be represented as escaped base 10
+ // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
+ // carriage returns, and line feeds should be converted to \t, \r, and \n
+ // respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Attributes for operations with an authenticated and/or authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be
+ // used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](/~https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// FaaS attributes
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Attributes for Feature Flags.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// Describes file attributes.
+const (
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is
+ // located. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/user', 'C:\\Program Files\\MyApp'
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the
+ // leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: When the file name has multiple extensions (example.tar.gz), only
+ // the last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.png'
+ FileNameKey = attribute.Key("file.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/alice/example.png', 'C:\\Program
+ // Files\\MyApp\\myapp.exe'
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FileSizeKey = attribute.Key("file.size")
+)
+
+// FileDirectory returns an attribute KeyValue conforming to the
+// "file.directory" semantic conventions. It represents the directory where the
+// file is located. It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the
+// "file.extension" semantic conventions. It represents the file extension,
+// excluding the leading dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name"
+// semantic conventions. It represents the name of the file including the
+// extension, without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path"
+// semantic conventions. It represents the full path to the file, including the
+// file name. It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size"
+// semantic conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// Attributes for Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Attributes for Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// The attributes used to describe telemetry in the context of LLM (Large
+// Language Models) requests and responses.
+const (
+ // GenAiCompletionKey is the attribute Key conforming to the
+ // "gen_ai.completion" semantic conventions. It represents the full
+ // response received from the LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
+ // Paris.'}]"
+ // Note: It's RECOMMENDED to format completions as JSON string matching
+ // [OpenAI messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiCompletionKey = attribute.Key("gen_ai.completion")
+
+ // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
+ // semantic conventions. It represents the full prompt sent to an LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'user', 'content': 'What is the capital of
+ // France?'}]"
+ // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
+ // messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiPromptKey = attribute.Key("gen_ai.prompt")
+
+ // GenAiRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the
+ // maximum number of tokens the LLM generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAiRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of
+ // the LLM a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4'
+ GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAiRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0.0
+ GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAiRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p
+ // sampling setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0
+ GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to
+ // each generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'stop'
+ GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAiResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'chatcmpl-123'
+ GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAiResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of
+ // the LLM a response was generated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4-0613'
+ GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
+ // semantic conventions. It represents the Generative AI product as
+ // identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'openai'
+ // Note: The actual GenAI product may differ from the one identified by the
+ // client. For example, when using OpenAI client libraries to communicate
+ // with Mistral, the `gen_ai.system` is set to `openai` based on the
+ // instrumentation's best knowledge.
+ GenAiSystemKey = attribute.Key("gen_ai.system")
+
+ // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 180
+ GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
+
+ // GenAiUsagePromptTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM prompt.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
+)
+
+var (
+ // OpenAI
+ GenAiSystemOpenai = GenAiSystemKey.String("openai")
+)
+
+// GenAiCompletion returns an attribute KeyValue conforming to the
+// "gen_ai.completion" semantic conventions. It represents the full response
+// received from the LLM.
+func GenAiCompletion(val string) attribute.KeyValue {
+ return GenAiCompletionKey.String(val)
+}
+
+// GenAiPrompt returns an attribute KeyValue conforming to the
+// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
+// an LLM.
+func GenAiPrompt(val string) attribute.KeyValue {
+ return GenAiPromptKey.String(val)
+}
+
+// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the LLM generates for a request.
+func GenAiRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAiRequestMaxTokensKey.Int(val)
+}
+
+// GenAiRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// LLM a request is being made to.
+func GenAiRequestModel(val string) attribute.KeyValue {
+ return GenAiRequestModelKey.String(val)
+}
+
+// GenAiRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the LLM request.
+func GenAiRequestTemperature(val float64) attribute.KeyValue {
+ return GenAiRequestTemperatureKey.Float64(val)
+}
+
+// GenAiRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p
+// sampling setting for the LLM request.
+func GenAiRequestTopP(val float64) attribute.KeyValue {
+ return GenAiRequestTopPKey.Float64(val)
+}
+
+// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
+// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
+// array of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAiResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAiResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique
+// identifier for the completion.
+func GenAiResponseID(val string) attribute.KeyValue {
+ return GenAiResponseIDKey.String(val)
+}
+
+// GenAiResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// LLM a response was generated from.
+func GenAiResponseModel(val string) attribute.KeyValue {
+ return GenAiResponseModelKey.String(val)
+}
+
+// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
+// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+// number of tokens used in the LLM response (completion).
+func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
+ return GenAiUsageCompletionTokensKey.Int(val)
+}
+
+// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
+// of tokens used in the LLM prompt.
+func GenAiUsagePromptTokens(val int) attribute.KeyValue {
+ return GenAiUsagePromptTokensKey.Int(val)
+}
+
+// Attributes for GraphQL.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// Attributes for the Android platform on which the Android application is
+// running.
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1', 'r1p1'
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of
+ // the HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'active', 'idle'
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the
+ // "http.request.size" semantic conventions. It represents the total size
+ // of the request in bytes. This should be the total number of bytes sent
+ // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
+ // and HTTP/3), headers, and request body if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size
+ // of the response in bytes. This should be the total number of bytes sent
+ // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
+ // HTTP/3), headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // active state
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of
+// the request in bytes. This should be the total number of bytes sent over the
+// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of
+// the response in bytes. This should be the total number of bytes sent over
+// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Java Virtual machine related attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+
+ // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
+ // semantic conventions. It represents the name of the garbage collector
+ // action.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'end of minor GC', 'end of major GC'
+ // Note: Garbage collector action is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
+ JvmGcActionKey = attribute.Key("jvm.gc.action")
+
+ // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
+ // semantic conventions. It represents the name of the garbage collector.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Young Generation', 'G1 Old Generation'
+ // Note: Garbage collector name is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
+ JvmGcNameKey = attribute.Key("jvm.gc.name")
+
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+
+ // JvmThreadDaemonKey is the attribute Key conforming to the
+ // "jvm.thread.daemon" semantic conventions. It represents the whether the
+ // thread is daemon or not.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
+
+ // JvmThreadStateKey is the attribute Key conforming to the
+ // "jvm.thread.state" semantic conventions. It represents the state of the
+ // thread.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'runnable', 'blocked'
+ JvmThreadStateKey = attribute.Key("jvm.thread.state")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+var (
+ // A thread that has not yet started is in this state
+ JvmThreadStateNew = JvmThreadStateKey.String("new")
+ // A thread executing in the Java virtual machine is in this state
+ JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
+ // A thread that is blocked waiting for a monitor lock is in this state
+ JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
+ // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
+ JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
+ // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
+ JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
+ // A thread that has exited is in this state
+ JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// JvmGcAction returns an attribute KeyValue conforming to the
+// "jvm.gc.action" semantic conventions. It represents the name of the garbage
+// collector action.
+func JvmGcAction(val string) attribute.KeyValue {
+ return JvmGcActionKey.String(val)
+}
+
+// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
+// semantic conventions. It represents the name of the garbage collector.
+func JvmGcName(val string) attribute.KeyValue {
+ return JvmGcNameKey.String(val)
+}
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// JvmThreadDaemon returns an attribute KeyValue conforming to the
+// "jvm.thread.daemon" semantic conventions. It represents the whether the
+// thread is daemon or not.
+func JvmThreadDaemon(val bool) attribute.KeyValue {
+ return JvmThreadDaemonKey.Bool(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
+ // conforming to the "k8s.container.status.last_terminated_reason" semantic
+ // conventions. It represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Evicted', 'Error'
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Attributes for a file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// The generic attributes that may be used in any Log Record.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](/~https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to
+ // the "messaging.destination.partition.id" semantic conventions. It
+ // represents the identifier of the partition messages are sent to or
+ // received from, unique within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1'
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack', 'nack', 'send'
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents the messaging
+ // system as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate
+ // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
+ // the instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are delivered to or processed by a consumer
+ MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming
+// to the "messaging.destination.partition.id" semantic conventions. It
+// represents the identifier of the partition messages are sent to or received
+// from, unique within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// This group describes attributes specific to Apache Kafka.
+const (
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// This group describes attributes specific to RabbitMQ.
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
+ // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
+ // It represents the rabbitMQ message delivery tag
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 123
+ MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
+// conventions. It represents the rabbitMQ message delivery tag
+func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
+}
+
+// This group describes attributes specific to RocketMQ.
+const (
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// This group describes attributes specific to GCP Pub/Sub.
+const (
+ // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
+ // It represents the ack deadline in seconds set for the modify ack
+ // deadline request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+ // represents the ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack_id'
+ MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
+ // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
+ // semantic conventions. It represents the delivery attempt for a given
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+)
+
+// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
+// conventions. It represents the ack deadline in seconds set for the modify
+// ack deadline request.
+func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+// represents the ack id for a given message.
+func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// This group describes attributes specific to Azure Service Bus.
+const (
+ // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
+ // conforming to the "messaging.servicebus.destination.subscription_name"
+ // semantic conventions. It represents the name of the subscription in the
+ // topic messages are received from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mySubscription'
+ MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
+
+ // MessagingServicebusDispositionStatusKey is the attribute Key conforming
+ // to the "messaging.servicebus.disposition_status" semantic conventions.
+ // It represents the describes the [settlement
+ // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServicebusMessageDeliveryCountKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.delivery_count" semantic
+ // conventions. It represents the number of deliveries that have been
+ // attempted for this message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.enqueued_time" semantic
+ // conventions. It represents the UTC epoch seconds at which the message
+ // has been accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+)
+
+var (
+ // Message is completed
+ MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
+)
+
+// MessagingServicebusDestinationSubscriptionName returns an attribute
+// KeyValue conforming to the
+// "messaging.servicebus.destination.subscription_name" semantic conventions.
+// It represents the name of the subscription in the topic messages are
+// received from.
+func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingServicebusDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServicebusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
+}
+
+// This group describes attributes specific to Azure Event Hubs.
+const (
+ // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
+ // the "messaging.eventhubs.consumer.group" semantic conventions. It
+ // represents the name of the consumer group the event consumer is
+ // associated with.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'indexer'
+ MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
+
+ // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
+ // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
+ // It represents the UTC epoch seconds at which the message has been
+ // accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+)
+
+// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.consumer.group" semantic conventions. It
+// represents the name of the consumer group the event consumer is associated
+// with.
+func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
+ return MessagingEventhubsConsumerGroupKey.String(val)
+}
+
+// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // actual version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.1', '2'
+ // Note: If protocol version is subject to negotiation (for example using
+ // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
+ // SHOULD be set to the negotiated version. If the actual protocol version
+ // is not known, this attribute SHOULD NOT be set.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](/~https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](/~https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// Attributes used by the OpenTracing Shim layer.
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Attributes reserved for OpenTelemetry
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were
+ // voluntary or involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and
+ // time the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:25:34.853Z'
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the
+ // "process.exit.code" semantic conventions. It represents the exit code of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the
+ // "process.exit.time" semantic conventions. It represents the date and
+ // time the process exited, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:26:12.315Z'
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID
+ // of the process's group leader. This is also the process group ID (PGID)
+ // of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether
+ // the process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type
+ // of page fault for this data point. Type `major` is for major/hard page
+ // faults, and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user
+ // ID (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the
+ // username of the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved
+ // user ID (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the
+ // username of the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID
+ // of the process's session leader. This is also the session ID (SID) of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessUserIDKey is the attribute Key conforming to the
+ // "process.user.id" semantic conventions. It represents the effective user
+ // ID (EUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the
+ // "process.user.name" semantic conventions. It represents the username of
+ // the effective user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
+ // semantic conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily
+ // unique across all processes on the host but it is unique within the
+ // process namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+)
+
+var (
+ // voluntary
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+var (
+ // major
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and
+// time the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time
+// the process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of
+// the process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user
+// ID (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username
+// of the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the
+// "process.vpid" semantic conventions. It represents the virtual process
+// identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// Attributes for process CPU
+const (
+ // ProcessCPUStateKey is the attribute Key conforming to the
+ // "process.cpu.state" semantic conventions. It represents the CPU state of
+ // the process.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessCPUStateKey = attribute.Key("process.cpu.state")
+)
+
+var (
+ // system
+ ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
+ // user
+ ProcessCPUStateUser = ProcessCPUStateKey.String("user")
+ // wait
+ ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
+)
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](/~https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the
+ // "rpc.message.type" semantic conventions. It represents the whether this
+ // is a received or sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // sent
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the
+// "rpc.message.id" semantic conventions. It represents the mUST be calculated
+// as two different counters starting from `1` one for sent messages and one
+// for received message.
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
+// the "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time
+ // (e.g. instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random
+ // Version 1 or Version 4 [RFC
+ // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
+ // inherent unique ID as the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be
+ // used as source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the
+ // purposes of identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
+ // file, the underlying
+ // data, such as pod name and namespace should be treated as confidential,
+ // being the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we
+ // do not recommend using one identifier
+ // for all processes participating in the application. Instead, it's
+ // recommended each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it
+ // can't unambiguously determine the
+ // service instance that is generating that telemetry. For instance,
+ // creating an UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container
+ // within that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers,
+ // as they know the target address and
+ // port.
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
+ // `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](/~https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Describes System attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process attributes
+const (
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+var (
+ // running
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Attributes for telemetry SDK.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain"
+ // semantic conventions. It represents the domain extracted from the
+ // `url.full`, such as "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
+ // '[1080:0:0:0:8:800:200C:417A]'
+ // Note: In some cases a URL may refer to an IP and/or port directly,
+ // without a domain name. In this case, the IP address would go to the
+ // domain field. If the URL contains a [literal IPv6
+ // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
+ // `[` and `]`, the `[` and `]` characters should also be captured in the
+ // domain field.
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from
+ // the `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: The file extension is only set if it exists, as not every url has
+ // a file extension. When the file name has multiple extensions
+ // `example.tar.gz`, only the last one should be captured `gz`, not
+ // `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed). Sensitive content provided in `url.full` SHOULD be
+ // scrubbed when instrumentations can identify it.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original"
+ // semantic conventions. It represents the unmodified original URL as seen
+ // in the event source.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // 'search?q=OpenTelemetry'
+ // Note: In network monitoring, the observed URL may be a full URL, whereas
+ // in access logs, the URL is often just represented as a path. This field
+ // is meant to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the
+ // same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.com', 'foo.co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org). For example, the registered domain for
+ // `foo.example.com` is `example.com`. Trying to approximate this by simply
+ // taking the last two labels will not work well for TLDs such as `co.uk`.
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name
+ // under the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain
+ // contains all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'east', 'sub2.sub1'
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
+ // the domain has multiple levels of subdomain, such as
+ // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
+ // with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template"
+ // semantic conventions. It represents the low-cardinality template of an
+ // [absolute path
+ // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective
+ // top level domain (eTLD), also known as the domain suffix, is the last
+ // part of the domain name. For example, the top level domain for
+ // example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com', 'co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org).
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the
+// `url.full`, such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the
+// "url.extension" semantic conventions. It represents the file extension
+// extracted from the `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the
+// "url.original" semantic conventions. It represents the unmodified original
+// URL as seen in the event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port"
+// semantic conventions. It represents the port extracted from the `url.full`
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the
+// "url.subdomain" semantic conventions. It represents the subdomain portion of
+// a fully qualified domain name includes all of the names except the host name
+// under the registered_domain. In a partially qualified domain, or if the
+// qualification level of the full name cannot be determined, subdomain
+// contains all of the names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the
+// "url.template" semantic conventions. It represents the low-cardinality
+// template of an [absolute path
+// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of
+// the domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentNameKey is the attribute Key conforming to the
+ // "user_agent.name" semantic conventions. It represents the name of the
+ // user-agent extracted from original. Usually refers to the browser's
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Safari', 'YourApp'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
+ // from original string. In the case of using a user-agent for non-browser
+ // products, such as microservices with multiple names/versions inside the
+ // `user_agent.original`, the most significant name SHOULD be selected. In
+ // such a scenario it should align with `user_agent.version`
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of
+ // the user-agent extracted from original. Usually refers to the browser's
+ // version
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.1.2', '1.0.0'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's
+ // version from original string. In the case of using a user-agent for
+ // non-browser products, such as microservices with multiple names/versions
+ // inside the `user_agent.original`, the most significant version SHOULD be
+ // selected. In such a scenario it should align with `user_agent.name`
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// The attributes used to describe the packaged software running the
+// application code.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
new file mode 100644
index 000000000000..d031bbea7841
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.26.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
new file mode 100644
index 000000000000..bfaee0d56e38
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
new file mode 100644
index 000000000000..fcdb9f48596e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
@@ -0,0 +1,1307 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+
+ // ContainerCPUTime is the metric conforming to the "container.cpu.time"
+ // semantic conventions. It represents the total CPU time consumed.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ContainerCPUTimeName = "container.cpu.time"
+ ContainerCPUTimeUnit = "s"
+ ContainerCPUTimeDescription = "Total CPU time consumed"
+
+ // ContainerMemoryUsage is the metric conforming to the
+ // "container.memory.usage" semantic conventions. It represents the memory
+ // usage of the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerMemoryUsageName = "container.memory.usage"
+ ContainerMemoryUsageUnit = "By"
+ ContainerMemoryUsageDescription = "Memory usage of the container."
+
+ // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
+ // conventions. It represents the disk bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerDiskIoName = "container.disk.io"
+ ContainerDiskIoUnit = "By"
+ ContainerDiskIoDescription = "Disk bytes for the container."
+
+ // ContainerNetworkIo is the metric conforming to the "container.network.io"
+ // semantic conventions. It represents the network bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerNetworkIoName = "container.network.io"
+ ContainerNetworkIoUnit = "By"
+ ContainerNetworkIoDescription = "Network bytes for the container."
+
+ // DBClientOperationDuration is the metric conforming to the
+ // "db.client.operation.duration" semantic conventions. It represents the
+ // duration of database client operations.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientOperationDurationName = "db.client.operation.duration"
+ DBClientOperationDurationUnit = "s"
+ DBClientOperationDurationDescription = "Duration of database client operations."
+
+ // DBClientConnectionCount is the metric conforming to the
+ // "db.client.connection.count" semantic conventions. It represents the number
+ // of connections that are currently in state described by the `state`
+ // attribute.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionCountName = "db.client.connection.count"
+ DBClientConnectionCountUnit = "{connection}"
+ DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+ // DBClientConnectionIdleMax is the metric conforming to the
+ // "db.client.connection.idle.max" semantic conventions. It represents the
+ // maximum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
+ DBClientConnectionIdleMaxUnit = "{connection}"
+ DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+ // DBClientConnectionIdleMin is the metric conforming to the
+ // "db.client.connection.idle.min" semantic conventions. It represents the
+ // minimum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMinName = "db.client.connection.idle.min"
+ DBClientConnectionIdleMinUnit = "{connection}"
+ DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
+
+ // DBClientConnectionMax is the metric conforming to the
+ // "db.client.connection.max" semantic conventions. It represents the maximum
+ // number of open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionMaxName = "db.client.connection.max"
+ DBClientConnectionMaxUnit = "{connection}"
+ DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
+
+ // DBClientConnectionPendingRequests is the metric conforming to the
+ // "db.client.connection.pending_requests" semantic conventions. It represents
+ // the number of pending requests for an open connection, cumulative for the
+ // entire pool.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
+ DBClientConnectionPendingRequestsUnit = "{request}"
+ DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+ // DBClientConnectionTimeouts is the metric conforming to the
+ // "db.client.connection.timeouts" semantic conventions. It represents the
+ // number of connection timeouts that have occurred trying to obtain a
+ // connection from the pool.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
+ DBClientConnectionTimeoutsUnit = "{timeout}"
+ DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+ // DBClientConnectionCreateTime is the metric conforming to the
+ // "db.client.connection.create_time" semantic conventions. It represents the
+ // time it took to create a new connection.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionCreateTimeName = "db.client.connection.create_time"
+ DBClientConnectionCreateTimeUnit = "s"
+ DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
+
+ // DBClientConnectionWaitTime is the metric conforming to the
+ // "db.client.connection.wait_time" semantic conventions. It represents the
+ // time it took to obtain an open connection from the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
+ DBClientConnectionWaitTimeUnit = "s"
+ DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+ // DBClientConnectionUseTime is the metric conforming to the
+ // "db.client.connection.use_time" semantic conventions. It represents the time
+ // between borrowing a connection and returning it to the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionUseTimeName = "db.client.connection.use_time"
+ DBClientConnectionUseTimeUnit = "s"
+ DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+ // DBClientConnectionsUsage is the metric conforming to the
+ // "db.client.connections.usage" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.count` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsUsageName = "db.client.connections.usage"
+ DBClientConnectionsUsageUnit = "{connection}"
+ DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
+
+ // DBClientConnectionsIdleMax is the metric conforming to the
+ // "db.client.connections.idle.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
+ DBClientConnectionsIdleMaxUnit = "{connection}"
+ DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
+
+ // DBClientConnectionsIdleMin is the metric conforming to the
+ // "db.client.connections.idle.min" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.min` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
+ DBClientConnectionsIdleMinUnit = "{connection}"
+ DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
+
+ // DBClientConnectionsMax is the metric conforming to the
+ // "db.client.connections.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsMaxName = "db.client.connections.max"
+ DBClientConnectionsMaxUnit = "{connection}"
+ DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
+
+ // DBClientConnectionsPendingRequests is the metric conforming to the
+ // "db.client.connections.pending_requests" semantic conventions. It represents
+ // the deprecated, use `db.client.connection.pending_requests` instead.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
+ DBClientConnectionsPendingRequestsUnit = "{request}"
+ DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
+
+ // DBClientConnectionsTimeouts is the metric conforming to the
+ // "db.client.connections.timeouts" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.timeouts` instead.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
+ DBClientConnectionsTimeoutsUnit = "{timeout}"
+ DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
+
+ // DBClientConnectionsCreateTime is the metric conforming to the
+ // "db.client.connections.create_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.create_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
+ DBClientConnectionsCreateTimeUnit = "ms"
+ DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsWaitTime is the metric conforming to the
+ // "db.client.connections.wait_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
+ DBClientConnectionsWaitTimeUnit = "ms"
+ DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsUseTime is the metric conforming to the
+ // "db.client.connections.use_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
+ // changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsUseTimeName = "db.client.connections.use_time"
+ DBClientConnectionsUseTimeUnit = "ms"
+ DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+ // semantic conventions. It represents the measures the time taken to perform a
+ // DNS lookup.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DNSLookupDurationName = "dns.lookup.duration"
+ DNSLookupDurationUnit = "s"
+ DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
+
+ // AspnetcoreRoutingMatchAttempts is the metric conforming to the
+ // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
+ // number of requests that were attempted to be matched to an endpoint.
+ // Instrument: counter
+ // Unit: {match_attempt}
+ // Stability: Stable
+ AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
+ AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
+ AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
+
+ // AspnetcoreDiagnosticsExceptions is the metric conforming to the
+ // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
+ // number of exceptions caught by exception handling middleware.
+ // Instrument: counter
+ // Unit: {exception}
+ // Stability: Stable
+ AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
+ AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
+ AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
+
+ // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
+ // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
+ // represents the number of requests that are currently active on the server
+ // that hold a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
+ AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
+ AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
+ // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
+ // represents the duration of rate limiting lease held by requests on the
+ // server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
+ AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
+ AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
+
+ // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
+ // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
+ // represents the time the request spent in a queue waiting to acquire a rate
+ // limiting lease.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
+ AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
+ AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
+ // represents the number of requests that are currently queued, waiting to
+ // acquire a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
+ AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
+ AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
+ // number of requests that tried to acquire a rate limiting lease.
+ // Instrument: counter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
+ AspnetcoreRateLimitingRequestsUnit = "{request}"
+ AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
+
+ // KestrelActiveConnections is the metric conforming to the
+ // "kestrel.active_connections" semantic conventions. It represents the number
+ // of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelActiveConnectionsName = "kestrel.active_connections"
+ KestrelActiveConnectionsUnit = "{connection}"
+ KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // KestrelConnectionDuration is the metric conforming to the
+ // "kestrel.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelConnectionDurationName = "kestrel.connection.duration"
+ KestrelConnectionDurationUnit = "s"
+ KestrelConnectionDurationDescription = "The duration of connections on the server."
+
+ // KestrelRejectedConnections is the metric conforming to the
+ // "kestrel.rejected_connections" semantic conventions. It represents the
+ // number of connections rejected by the server.
+ // Instrument: counter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelRejectedConnectionsName = "kestrel.rejected_connections"
+ KestrelRejectedConnectionsUnit = "{connection}"
+ KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
+
+ // KestrelQueuedConnections is the metric conforming to the
+ // "kestrel.queued_connections" semantic conventions. It represents the number
+ // of connections that are currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelQueuedConnectionsName = "kestrel.queued_connections"
+ KestrelQueuedConnectionsUnit = "{connection}"
+ KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
+
+ // KestrelQueuedRequests is the metric conforming to the
+ // "kestrel.queued_requests" semantic conventions. It represents the number of
+ // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
+ // currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ KestrelQueuedRequestsName = "kestrel.queued_requests"
+ KestrelQueuedRequestsUnit = "{request}"
+ KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
+
+ // KestrelUpgradedConnections is the metric conforming to the
+ // "kestrel.upgraded_connections" semantic conventions. It represents the
+ // number of connections that are currently upgraded (WebSockets). .
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
+ KestrelUpgradedConnectionsUnit = "{connection}"
+ KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
+
+ // KestrelTLSHandshakeDuration is the metric conforming to the
+ // "kestrel.tls_handshake.duration" semantic conventions. It represents the
+ // duration of TLS handshakes on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
+ KestrelTLSHandshakeDurationUnit = "s"
+ KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
+
+ // KestrelActiveTLSHandshakes is the metric conforming to the
+ // "kestrel.active_tls_handshakes" semantic conventions. It represents the
+ // number of TLS handshakes that are currently in progress on the server.
+ // Instrument: updowncounter
+ // Unit: {handshake}
+ // Stability: Stable
+ KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
+ KestrelActiveTLSHandshakesUnit = "{handshake}"
+ KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
+
+ // SignalrServerConnectionDuration is the metric conforming to the
+ // "signalr.server.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ SignalrServerConnectionDurationName = "signalr.server.connection.duration"
+ SignalrServerConnectionDurationUnit = "s"
+ SignalrServerConnectionDurationDescription = "The duration of connections on the server."
+
+ // SignalrServerActiveConnections is the metric conforming to the
+ // "signalr.server.active_connections" semantic conventions. It represents the
+ // number of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ SignalrServerActiveConnectionsName = "signalr.server.active_connections"
+ SignalrServerActiveConnectionsUnit = "{connection}"
+ SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's logic execution.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInvokeDurationName = "faas.invoke_duration"
+ FaaSInvokeDurationUnit = "s"
+ FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
+
+ // FaaSInitDuration is the metric conforming to the "faas.init_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's initialization, such as a cold start.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInitDurationName = "faas.init_duration"
+ FaaSInitDurationUnit = "s"
+ FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
+
+ // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
+ // conventions. It represents the number of invocation cold starts.
+ // Instrument: counter
+ // Unit: {coldstart}
+ // Stability: Experimental
+ FaaSColdstartsName = "faas.coldstarts"
+ FaaSColdstartsUnit = "{coldstart}"
+ FaaSColdstartsDescription = "Number of invocation cold starts"
+
+ // FaaSErrors is the metric conforming to the "faas.errors" semantic
+ // conventions. It represents the number of invocation errors.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ FaaSErrorsName = "faas.errors"
+ FaaSErrorsUnit = "{error}"
+ FaaSErrorsDescription = "Number of invocation errors"
+
+ // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
+ // conventions. It represents the number of successful invocations.
+ // Instrument: counter
+ // Unit: {invocation}
+ // Stability: Experimental
+ FaaSInvocationsName = "faas.invocations"
+ FaaSInvocationsUnit = "{invocation}"
+ FaaSInvocationsDescription = "Number of successful invocations"
+
+ // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
+ // conventions. It represents the number of invocation timeouts.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ FaaSTimeoutsName = "faas.timeouts"
+ FaaSTimeoutsUnit = "{timeout}"
+ FaaSTimeoutsDescription = "Number of invocation timeouts"
+
+ // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
+ // conventions. It represents the distribution of max memory usage per
+ // invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSMemUsageName = "faas.mem_usage"
+ FaaSMemUsageUnit = "By"
+ FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
+
+ // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
+ // conventions. It represents the distribution of CPU usage per invocation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSCPUUsageName = "faas.cpu_usage"
+ FaaSCPUUsageUnit = "s"
+ FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
+
+ // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
+ // conventions. It represents the distribution of net I/O usage per invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSNetIoName = "faas.net_io"
+ FaaSNetIoUnit = "By"
+ FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
+
+ // HTTPServerRequestDuration is the metric conforming to the
+ // "http.server.request.duration" semantic conventions. It represents the
+ // duration of HTTP server requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPServerRequestDurationName = "http.server.request.duration"
+ HTTPServerRequestDurationUnit = "s"
+ HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
+
+ // HTTPServerActiveRequests is the metric conforming to the
+ // "http.server.active_requests" semantic conventions. It represents the number
+ // of active HTTP server requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPServerActiveRequestsName = "http.server.active_requests"
+ HTTPServerActiveRequestsUnit = "{request}"
+ HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
+
+ // HTTPServerRequestBodySize is the metric conforming to the
+ // "http.server.request.body.size" semantic conventions. It represents the size
+ // of HTTP server request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerRequestBodySizeName = "http.server.request.body.size"
+ HTTPServerRequestBodySizeUnit = "By"
+ HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
+
+ // HTTPServerResponseBodySize is the metric conforming to the
+ // "http.server.response.body.size" semantic conventions. It represents the
+ // size of HTTP server response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerResponseBodySizeName = "http.server.response.body.size"
+ HTTPServerResponseBodySizeUnit = "By"
+ HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
+
+ // HTTPClientRequestDuration is the metric conforming to the
+ // "http.client.request.duration" semantic conventions. It represents the
+ // duration of HTTP client requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPClientRequestDurationName = "http.client.request.duration"
+ HTTPClientRequestDurationUnit = "s"
+ HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
+
+ // HTTPClientRequestBodySize is the metric conforming to the
+ // "http.client.request.body.size" semantic conventions. It represents the size
+ // of HTTP client request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientRequestBodySizeName = "http.client.request.body.size"
+ HTTPClientRequestBodySizeUnit = "By"
+ HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
+
+ // HTTPClientResponseBodySize is the metric conforming to the
+ // "http.client.response.body.size" semantic conventions. It represents the
+ // size of HTTP client response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientResponseBodySizeName = "http.client.response.body.size"
+ HTTPClientResponseBodySizeUnit = "By"
+ HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
+
+ // HTTPClientOpenConnections is the metric conforming to the
+ // "http.client.open_connections" semantic conventions. It represents the
+ // number of outbound HTTP connections that are currently active or idle on the
+ // client.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ HTTPClientOpenConnectionsName = "http.client.open_connections"
+ HTTPClientOpenConnectionsUnit = "{connection}"
+ HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+ // HTTPClientConnectionDuration is the metric conforming to the
+ // "http.client.connection.duration" semantic conventions. It represents the
+ // duration of the successfully established outbound HTTP connections.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientConnectionDurationName = "http.client.connection.duration"
+ HTTPClientConnectionDurationUnit = "s"
+ HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+ // HTTPClientActiveRequests is the metric conforming to the
+ // "http.client.active_requests" semantic conventions. It represents the number
+ // of active HTTP requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPClientActiveRequestsName = "http.client.active_requests"
+ HTTPClientActiveRequestsUnit = "{request}"
+ HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
+ // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
+ // conventions. It represents the measure of initial memory requested.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmMemoryInitName = "jvm.memory.init"
+ JvmMemoryInitUnit = "By"
+ JvmMemoryInitDescription = "Measure of initial memory requested."
+
+ // JvmSystemCPUUtilization is the metric conforming to the
+ // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
+ // CPU utilization for the whole system as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
+ JvmSystemCPUUtilizationUnit = "1"
+ JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
+
+ // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
+ // semantic conventions. It represents the average CPU load of the whole system
+ // for the last minute as reported by the JVM.
+ // Instrument: gauge
+ // Unit: {run_queue_item}
+ // Stability: Experimental
+ JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
+ JvmSystemCPULoad1mUnit = "{run_queue_item}"
+ JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
+
+ // JvmBufferMemoryUsage is the metric conforming to the
+ // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
+ // memory used by buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
+ JvmBufferMemoryUsageUnit = "By"
+ JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
+
+ // JvmBufferMemoryLimit is the metric conforming to the
+ // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
+ // total memory capacity of buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
+ JvmBufferMemoryLimitUnit = "By"
+ JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
+
+ // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
+ // conventions. It represents the number of buffers in the pool.
+ // Instrument: updowncounter
+ // Unit: {buffer}
+ // Stability: Experimental
+ JvmBufferCountName = "jvm.buffer.count"
+ JvmBufferCountUnit = "{buffer}"
+ JvmBufferCountDescription = "Number of buffers in the pool."
+
+ // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
+ // conventions. It represents the measure of memory used.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedName = "jvm.memory.used"
+ JvmMemoryUsedUnit = "By"
+ JvmMemoryUsedDescription = "Measure of memory used."
+
+ // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
+ // semantic conventions. It represents the measure of memory committed.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryCommittedName = "jvm.memory.committed"
+ JvmMemoryCommittedUnit = "By"
+ JvmMemoryCommittedDescription = "Measure of memory committed."
+
+ // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
+ // conventions. It represents the measure of max obtainable memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryLimitName = "jvm.memory.limit"
+ JvmMemoryLimitUnit = "By"
+ JvmMemoryLimitDescription = "Measure of max obtainable memory."
+
+ // JvmMemoryUsedAfterLastGc is the metric conforming to the
+ // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
+ // measure of memory used, as measured after the most recent garbage collection
+ // event on this pool.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
+ JvmMemoryUsedAfterLastGcUnit = "By"
+ JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
+
+ // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
+ // conventions. It represents the duration of JVM garbage collection actions.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ JvmGcDurationName = "jvm.gc.duration"
+ JvmGcDurationUnit = "s"
+ JvmGcDurationDescription = "Duration of JVM garbage collection actions."
+
+ // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
+ // conventions. It represents the number of executing platform threads.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Stable
+ JvmThreadCountName = "jvm.thread.count"
+ JvmThreadCountUnit = "{thread}"
+ JvmThreadCountDescription = "Number of executing platform threads."
+
+ // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
+ // conventions. It represents the number of classes loaded since JVM start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassLoadedName = "jvm.class.loaded"
+ JvmClassLoadedUnit = "{class}"
+ JvmClassLoadedDescription = "Number of classes loaded since JVM start."
+
+ // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
+ // semantic conventions. It represents the number of classes unloaded since JVM
+ // start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassUnloadedName = "jvm.class.unloaded"
+ JvmClassUnloadedUnit = "{class}"
+ JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
+
+ // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
+ // conventions. It represents the number of classes currently loaded.
+ // Instrument: updowncounter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassCountName = "jvm.class.count"
+ JvmClassCountUnit = "{class}"
+ JvmClassCountDescription = "Number of classes currently loaded."
+
+ // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
+ // conventions. It represents the number of processors available to the Java
+ // virtual machine.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Stable
+ JvmCPUCountName = "jvm.cpu.count"
+ JvmCPUCountUnit = "{cpu}"
+ JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
+
+ // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
+ // conventions. It represents the cPU time used by the process as reported by
+ // the JVM.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Stable
+ JvmCPUTimeName = "jvm.cpu.time"
+ JvmCPUTimeUnit = "s"
+ JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
+
+ // JvmCPURecentUtilization is the metric conforming to the
+ // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
+ // CPU utilization for the process as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Stable
+ JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
+ JvmCPURecentUtilizationUnit = "1"
+ JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
+
+ // MessagingPublishDuration is the metric conforming to the
+ // "messaging.publish.duration" semantic conventions. It represents the
+ // measures the duration of publish operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingPublishDurationName = "messaging.publish.duration"
+ MessagingPublishDurationUnit = "s"
+ MessagingPublishDurationDescription = "Measures the duration of publish operation."
+
+ // MessagingReceiveDuration is the metric conforming to the
+ // "messaging.receive.duration" semantic conventions. It represents the
+ // measures the duration of receive operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingReceiveDurationName = "messaging.receive.duration"
+ MessagingReceiveDurationUnit = "s"
+ MessagingReceiveDurationDescription = "Measures the duration of receive operation."
+
+ // MessagingProcessDuration is the metric conforming to the
+ // "messaging.process.duration" semantic conventions. It represents the
+ // measures the duration of process operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingProcessDurationName = "messaging.process.duration"
+ MessagingProcessDurationUnit = "s"
+ MessagingProcessDurationDescription = "Measures the duration of process operation."
+
+ // MessagingPublishMessages is the metric conforming to the
+ // "messaging.publish.messages" semantic conventions. It represents the
+ // measures the number of published messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingPublishMessagesName = "messaging.publish.messages"
+ MessagingPublishMessagesUnit = "{message}"
+ MessagingPublishMessagesDescription = "Measures the number of published messages."
+
+ // MessagingReceiveMessages is the metric conforming to the
+ // "messaging.receive.messages" semantic conventions. It represents the
+ // measures the number of received messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingReceiveMessagesName = "messaging.receive.messages"
+ MessagingReceiveMessagesUnit = "{message}"
+ MessagingReceiveMessagesDescription = "Measures the number of received messages."
+
+ // MessagingProcessMessages is the metric conforming to the
+ // "messaging.process.messages" semantic conventions. It represents the
+ // measures the number of processed messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingProcessMessagesName = "messaging.process.messages"
+ MessagingProcessMessagesUnit = "{message}"
+ MessagingProcessMessagesDescription = "Measures the number of processed messages."
+
+ // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
+ // conventions. It represents the total CPU seconds broken down by different
+ // states.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ProcessCPUTimeName = "process.cpu.time"
+ ProcessCPUTimeUnit = "s"
+ ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
+
+ // ProcessCPUUtilization is the metric conforming to the
+ // "process.cpu.utilization" semantic conventions. It represents the difference
+ // in process.cpu.time since the last measurement, divided by the elapsed time
+ // and number of CPUs available to the process.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ ProcessCPUUtilizationName = "process.cpu.utilization"
+ ProcessCPUUtilizationUnit = "1"
+ ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
+
+ // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
+ // semantic conventions. It represents the amount of physical memory in use.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryUsageName = "process.memory.usage"
+ ProcessMemoryUsageUnit = "By"
+ ProcessMemoryUsageDescription = "The amount of physical memory in use."
+
+ // ProcessMemoryVirtual is the metric conforming to the
+ // "process.memory.virtual" semantic conventions. It represents the amount of
+ // committed virtual memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryVirtualName = "process.memory.virtual"
+ ProcessMemoryVirtualUnit = "By"
+ ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
+
+ // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
+ // conventions. It represents the disk bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessDiskIoName = "process.disk.io"
+ ProcessDiskIoUnit = "By"
+ ProcessDiskIoDescription = "Disk bytes transferred."
+
+ // ProcessNetworkIo is the metric conforming to the "process.network.io"
+ // semantic conventions. It represents the network bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessNetworkIoName = "process.network.io"
+ ProcessNetworkIoUnit = "By"
+ ProcessNetworkIoDescription = "Network bytes transferred."
+
+ // ProcessThreadCount is the metric conforming to the "process.thread.count"
+ // semantic conventions. It represents the process threads count.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Experimental
+ ProcessThreadCountName = "process.thread.count"
+ ProcessThreadCountUnit = "{thread}"
+ ProcessThreadCountDescription = "Process threads count."
+
+ // ProcessOpenFileDescriptorCount is the metric conforming to the
+ // "process.open_file_descriptor.count" semantic conventions. It represents the
+ // number of file descriptors in use by the process.
+ // Instrument: updowncounter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
+ ProcessOpenFileDescriptorCountUnit = "{count}"
+ ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
+
+ // ProcessContextSwitches is the metric conforming to the
+ // "process.context_switches" semantic conventions. It represents the number of
+ // times the process has been context switched.
+ // Instrument: counter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessContextSwitchesName = "process.context_switches"
+ ProcessContextSwitchesUnit = "{count}"
+ ProcessContextSwitchesDescription = "Number of times the process has been context switched."
+
+ // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
+ // semantic conventions. It represents the number of page faults the process
+ // has made.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ ProcessPagingFaultsName = "process.paging.faults"
+ ProcessPagingFaultsUnit = "{fault}"
+ ProcessPagingFaultsDescription = "Number of page faults the process has made."
+
+ // RPCServerDuration is the metric conforming to the "rpc.server.duration"
+ // semantic conventions. It represents the measures the duration of inbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCServerDurationName = "rpc.server.duration"
+ RPCServerDurationUnit = "ms"
+ RPCServerDurationDescription = "Measures the duration of inbound RPC."
+
+ // RPCServerRequestSize is the metric conforming to the
+ // "rpc.server.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerRequestSizeName = "rpc.server.request.size"
+ RPCServerRequestSizeUnit = "By"
+ RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCServerResponseSize is the metric conforming to the
+ // "rpc.server.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerResponseSizeName = "rpc.server.response.size"
+ RPCServerResponseSizeUnit = "By"
+ RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCServerRequestsPerRPC is the metric conforming to the
+ // "rpc.server.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
+ RPCServerRequestsPerRPCUnit = "{count}"
+ RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCServerResponsesPerRPC is the metric conforming to the
+ // "rpc.server.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
+ RPCServerResponsesPerRPCUnit = "{count}"
+ RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // RPCClientDuration is the metric conforming to the "rpc.client.duration"
+ // semantic conventions. It represents the measures the duration of outbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCClientDurationName = "rpc.client.duration"
+ RPCClientDurationUnit = "ms"
+ RPCClientDurationDescription = "Measures the duration of outbound RPC."
+
+ // RPCClientRequestSize is the metric conforming to the
+ // "rpc.client.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientRequestSizeName = "rpc.client.request.size"
+ RPCClientRequestSizeUnit = "By"
+ RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCClientResponseSize is the metric conforming to the
+ // "rpc.client.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientResponseSizeName = "rpc.client.response.size"
+ RPCClientResponseSizeUnit = "By"
+ RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCClientRequestsPerRPC is the metric conforming to the
+ // "rpc.client.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
+ RPCClientRequestsPerRPCUnit = "{count}"
+ RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCClientResponsesPerRPC is the metric conforming to the
+ // "rpc.client.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
+ RPCClientResponsesPerRPCUnit = "{count}"
+ RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
+ // conventions. It represents the seconds each logical CPU spent on each mode.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemCPUTimeName = "system.cpu.time"
+ SystemCPUTimeUnit = "s"
+ SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
+
+ // SystemCPUUtilization is the metric conforming to the
+ // "system.cpu.utilization" semantic conventions. It represents the difference
+ // in system.cpu.time since the last measurement, divided by the elapsed time
+ // and number of logical CPUs.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ SystemCPUUtilizationName = "system.cpu.utilization"
+ SystemCPUUtilizationUnit = "1"
+ SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
+
+ // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
+ // semantic conventions. It represents the reports the current frequency of the
+ // CPU in Hz.
+ // Instrument: gauge
+ // Unit: {Hz}
+ // Stability: Experimental
+ SystemCPUFrequencyName = "system.cpu.frequency"
+ SystemCPUFrequencyUnit = "{Hz}"
+ SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
+
+ // SystemCPUPhysicalCount is the metric conforming to the
+ // "system.cpu.physical.count" semantic conventions. It represents the reports
+ // the number of actual physical processor cores on the hardware.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPUPhysicalCountName = "system.cpu.physical.count"
+ SystemCPUPhysicalCountUnit = "{cpu}"
+ SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
+
+ // SystemCPULogicalCount is the metric conforming to the
+ // "system.cpu.logical.count" semantic conventions. It represents the reports
+ // the number of logical (virtual) processor cores created by the operating
+ // system to manage multitasking.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPULogicalCountName = "system.cpu.logical.count"
+ SystemCPULogicalCountUnit = "{cpu}"
+ SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
+
+ // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
+ // semantic conventions. It represents the reports memory in use by state.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryUsageName = "system.memory.usage"
+ SystemMemoryUsageUnit = "By"
+ SystemMemoryUsageDescription = "Reports memory in use by state."
+
+ // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
+ // semantic conventions. It represents the total memory available in the
+ // system.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryLimitName = "system.memory.limit"
+ SystemMemoryLimitUnit = "By"
+ SystemMemoryLimitDescription = "Total memory available in the system."
+
+ // SystemMemoryShared is the metric conforming to the "system.memory.shared"
+ // semantic conventions. It represents the shared memory used (mostly by
+ // tmpfs).
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemorySharedName = "system.memory.shared"
+ SystemMemorySharedUnit = "By"
+ SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
+
+ // SystemMemoryUtilization is the metric conforming to the
+ // "system.memory.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemMemoryUtilizationName = "system.memory.utilization"
+ SystemMemoryUtilizationUnit = "1"
+
+ // SystemPagingUsage is the metric conforming to the "system.paging.usage"
+ // semantic conventions. It represents the unix swap or windows pagefile usage.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemPagingUsageName = "system.paging.usage"
+ SystemPagingUsageUnit = "By"
+ SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
+
+ // SystemPagingUtilization is the metric conforming to the
+ // "system.paging.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingUtilizationName = "system.paging.utilization"
+ SystemPagingUtilizationUnit = "1"
+
+ // SystemPagingFaults is the metric conforming to the "system.paging.faults"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingFaultsName = "system.paging.faults"
+ SystemPagingFaultsUnit = "{fault}"
+
+ // SystemPagingOperations is the metric conforming to the
+ // "system.paging.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingOperationsName = "system.paging.operations"
+ SystemPagingOperationsUnit = "{operation}"
+
+ // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskIoName = "system.disk.io"
+ SystemDiskIoUnit = "By"
+
+ // SystemDiskOperations is the metric conforming to the
+ // "system.disk.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskOperationsName = "system.disk.operations"
+ SystemDiskOperationsUnit = "{operation}"
+
+ // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
+ // semantic conventions. It represents the time disk spent activated.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskIoTimeName = "system.disk.io_time"
+ SystemDiskIoTimeUnit = "s"
+ SystemDiskIoTimeDescription = "Time disk spent activated"
+
+ // SystemDiskOperationTime is the metric conforming to the
+ // "system.disk.operation_time" semantic conventions. It represents the sum of
+ // the time each operation took to complete.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskOperationTimeName = "system.disk.operation_time"
+ SystemDiskOperationTimeUnit = "s"
+ SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
+
+ // SystemDiskMerged is the metric conforming to the "system.disk.merged"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskMergedName = "system.disk.merged"
+ SystemDiskMergedUnit = "{operation}"
+
+ // SystemFilesystemUsage is the metric conforming to the
+ // "system.filesystem.usage" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUsageName = "system.filesystem.usage"
+ SystemFilesystemUsageUnit = "By"
+
+ // SystemFilesystemUtilization is the metric conforming to the
+ // "system.filesystem.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUtilizationName = "system.filesystem.utilization"
+ SystemFilesystemUtilizationUnit = "1"
+
+ // SystemNetworkDropped is the metric conforming to the
+ // "system.network.dropped" semantic conventions. It represents the count of
+ // packets that are dropped or discarded even though there was no error.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ SystemNetworkDroppedName = "system.network.dropped"
+ SystemNetworkDroppedUnit = "{packet}"
+ SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
+
+ // SystemNetworkPackets is the metric conforming to the
+ // "system.network.packets" semantic conventions.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkPacketsName = "system.network.packets"
+ SystemNetworkPacketsUnit = "{packet}"
+
+ // SystemNetworkErrors is the metric conforming to the "system.network.errors"
+ // semantic conventions. It represents the count of network errors detected.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ SystemNetworkErrorsName = "system.network.errors"
+ SystemNetworkErrorsUnit = "{error}"
+ SystemNetworkErrorsDescription = "Count of network errors detected"
+
+ // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkIoName = "system.network.io"
+ SystemNetworkIoUnit = "By"
+
+ // SystemNetworkConnections is the metric conforming to the
+ // "system.network.connections" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkConnectionsName = "system.network.connections"
+ SystemNetworkConnectionsUnit = "{connection}"
+
+ // SystemProcessCount is the metric conforming to the "system.process.count"
+ // semantic conventions. It represents the total number of processes in each
+ // state.
+ // Instrument: updowncounter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCountName = "system.process.count"
+ SystemProcessCountUnit = "{process}"
+ SystemProcessCountDescription = "Total number of processes in each state"
+
+ // SystemProcessCreated is the metric conforming to the
+ // "system.process.created" semantic conventions. It represents the total
+ // number of processes created over uptime of the host.
+ // Instrument: counter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCreatedName = "system.process.created"
+ SystemProcessCreatedUnit = "{process}"
+ SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
+
+ // SystemLinuxMemoryAvailable is the metric conforming to the
+ // "system.linux.memory.available" semantic conventions. It represents an
+ // estimate of how much memory is available for starting new applications,
+ // without causing swapping.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemLinuxMemoryAvailableName = "system.linux.memory.available"
+ SystemLinuxMemoryAvailableUnit = "By"
+ SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
new file mode 100644
index 000000000000..4c87c7adcc75
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
index caf7249de859..6836c65478b4 100644
--- a/vendor/go.opentelemetry.io/otel/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md
new file mode 100644
index 000000000000..58ccaba69b1e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/README.md
@@ -0,0 +1,3 @@
+# Trace API
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace)
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index 3aadc66cf7a7..273d58e00146 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
index 76f9a083c409..5650a174b4a0 100644
--- a/vendor/go.opentelemetry.io/otel/trace/context.go
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte
// performs no operations is returned.
func SpanFromContext(ctx context.Context) Span {
if ctx == nil {
- return noopSpan{}
+ return noopSpanInstance
}
if span, ok := ctx.Value(currentSpanKey).(Span); ok {
return span
}
- return noopSpan{}
+ return noopSpanInstance
}
// SpanContextFromContext returns the current Span's SpanContext.
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
index 440f3d7565a1..d661c5d100f7 100644
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package trace provides an implementation of the tracing part of the
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
new file mode 100644
index 000000000000..7754a239ee61
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
@@ -0,0 +1,3 @@
+# Trace Embedded
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
index 898db5a7546e..3e359a00bf47 100644
--- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package embedded provides interfaces embedded within the [OpenTelemetry
// trace API].
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
index 88fcb81611f9..c00221e7be9f 100644
--- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
+++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index c125491caebf..ca20e9997aba 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
span := SpanFromContext(ctx)
if _, ok := span.(nonRecordingSpan); !ok {
// span is likely already a noopSpan, but let's be sure
- span = noopSpan{}
+ span = noopSpanInstance
}
return ContextWithSpan(ctx, span), span
}
@@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
// noopSpan is an implementation of Span that performs no operations.
type noopSpan struct{ embedded.Span }
-var _ Span = noopSpan{}
+var noopSpanInstance Span = noopSpan{}
// SpanContext returns an empty span context.
func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
@@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {}
// AddEvent does nothing.
func (noopSpan) AddEvent(string, ...EventOption) {}
+// AddLink does nothing.
+func (noopSpan) AddLink(Link) {}
+
// SetName does nothing.
func (noopSpan) SetName(string) {}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
new file mode 100644
index 000000000000..cd382c82a1a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
@@ -0,0 +1,3 @@
+# Trace Noop
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop)
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
index 7f485543c47d..64a4f1b362f6 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package noop provides an implementation of the OpenTelemetry trace API that
// produces no telemetry and minimizes used computation resources.
@@ -78,11 +67,13 @@ func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption)
span = Span{sc: sc}
} else {
// No parent, return a No-Op span with an empty span context.
- span = Span{}
+ span = noopSpanInstance
}
return trace.ContextWithSpan(ctx, span), span
}
+var noopSpanInstance trace.Span = Span{}
+
// Span is an OpenTelemetry No-Op Span.
type Span struct {
embedded.Span
@@ -111,6 +102,9 @@ func (Span) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (Span) AddEvent(string, ...trace.EventOption) {}
+// AddLink does nothing.
+func (Span) AddLink(trace.Link) {}
+
// SetName does nothing.
func (Span) SetName(string) {}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index 26a4b2260ec6..28877d4ab4df 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -361,6 +350,12 @@ type Span interface {
// AddEvent adds an event with the provided name and options.
AddEvent(name string, options ...EventOption)
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
// IsRecording returns the recording state of the Span. It will return
// true if the Span is active and events can be recorded.
IsRecording() bool
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index d1e47ca2faac..20b5cf24332d 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -1,36 +1,19 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
import (
"encoding/json"
"fmt"
- "regexp"
"strings"
)
const (
maxListMembers = 32
- listDelimiter = ","
-
- // based on the W3C Trace Context specification, see
- // https://www.w3.org/TR/trace-context-1/#tracestate-header
- noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*`
- withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
+ listDelimiters = ","
+ memberDelimiter = "="
errInvalidKey errorConst = "invalid tracestate key"
errInvalidValue errorConst = "invalid tracestate value"
@@ -39,43 +22,128 @@ const (
errDuplicate errorConst = "duplicate list-member in tracestate"
)
-var (
- noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`)
- withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`)
- valueRe = regexp.MustCompile(`^` + valueFormat + `$`)
- memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
-)
-
type member struct {
Key string
Value string
}
-func newMember(key, value string) (member, error) {
- if len(key) > 256 {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+ return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+ return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+// value = (0*255(chr)) nblk-chr
+// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+// chr = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+ n := len(val)
+ if n == 0 || n > 256 {
+ return false
}
- if !noTenantKeyRe.MatchString(key) {
- if !withTenantKeyRe.MatchString(key) {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+ for i := 0; i < n-1; i++ {
+ if !checkValueChar(val[i]) {
+ return false
}
- atIndex := strings.LastIndex(key, "@")
- if atIndex > 241 || len(key)-1-atIndex > 14 {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+ }
+ return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+ // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ for _, v := range key {
+ if isAlphaNum(byte(v)) {
+ continue
}
+ switch v {
+ case '_', '-', '*', '/':
+ continue
+ }
+ return false
}
- if len(value) > 256 || !valueRe.MatchString(value) {
- return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
+ return true
+}
+
+// according to
+//
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ first := key[0] // key's first char
+ ret := len(key[1:]) <= n
+ ret = ret && first >= 'a' && first <= 'z'
+ return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+ if c >= 'a' && c <= 'z' {
+ return true
+ }
+ return c >= '0' && c <= '9'
+}
+
+// according to
+//
+// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+// key = simple-key / multi-tenant-key
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// multi-tenant-key = tenant-id "@" system-id
+// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// lcalpha = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+ tenant, system, ok := strings.Cut(key, "@")
+ if !ok {
+ return checkKeyPart(key, 255)
+ }
+ return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
+func newMember(key, value string) (member, error) {
+ if !checkKey(key) {
+ return member{}, errInvalidKey
+ }
+ if !checkValue(value) {
+ return member{}, errInvalidValue
}
return member{Key: key, Value: value}, nil
}
func parseMember(m string) (member, error) {
- matches := memberRe.FindStringSubmatch(m)
- if len(matches) != 3 {
+ key, val, ok := strings.Cut(m, memberDelimiter)
+ if !ok {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
- result, e := newMember(matches[1], matches[2])
+ key = strings.TrimLeft(key, " \t")
+ val = strings.TrimRight(val, " \t")
+ result, e := newMember(key, val)
if e != nil {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
@@ -85,7 +153,7 @@ func parseMember(m string) (member, error) {
// String encodes member into a string compliant with the W3C Trace Context
// specification.
func (m member) String() string {
- return fmt.Sprintf("%s=%s", m.Key, m.Value)
+ return m.Key + "=" + m.Value
}
// TraceState provides additional vendor-specific trace identification
@@ -109,8 +177,8 @@ var _ json.Marshaler = TraceState{}
// ParseTraceState attempts to decode a TraceState from the passed
// string. It returns an error if the input is invalid according to the W3C
// Trace Context specification.
-func ParseTraceState(tracestate string) (TraceState, error) {
- if tracestate == "" {
+func ParseTraceState(ts string) (TraceState, error) {
+ if ts == "" {
return TraceState{}, nil
}
@@ -120,7 +188,9 @@ func ParseTraceState(tracestate string) (TraceState, error) {
var members []member
found := make(map[string]struct{})
- for _, memberStr := range strings.Split(tracestate, listDelimiter) {
+ for ts != "" {
+ var memberStr string
+ memberStr, ts, _ = strings.Cut(ts, listDelimiters)
if len(memberStr) == 0 {
continue
}
@@ -153,11 +223,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) {
// Trace Context specification. The returned string will be invalid if the
// TraceState contains any invalid members.
func (ts TraceState) String() string {
- members := make([]string, len(ts.list))
- for i, m := range ts.list {
- members[i] = m.String()
+ if len(ts.list) == 0 {
+ return ""
+ }
+ var n int
+ n += len(ts.list) // member delimiters: '='
+ n += len(ts.list) - 1 // list delimiters: ','
+ for _, mem := range ts.list {
+ n += len(mem.Key)
+ n += len(mem.Value)
}
- return strings.Join(members, listDelimiter)
+
+ var sb strings.Builder
+ sb.Grow(n)
+ _, _ = sb.WriteString(ts.list[0].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[0].Value)
+ for i := 1; i < len(ts.list); i++ {
+ _ = sb.WriteByte(listDelimiters[0])
+ _, _ = sb.WriteString(ts.list[i].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[i].Value)
+ }
+ return sb.String()
}
// Get returns the value paired with key from the corresponding TraceState
@@ -189,15 +277,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) {
if err != nil {
return ts, err
}
-
- cTS := ts.Delete(key)
- if cTS.Len()+1 <= maxListMembers {
- cTS.list = append(cTS.list, member{})
+ n := len(ts.list)
+ found := n
+ for i := range ts.list {
+ if ts.list[i].Key == key {
+ found = i
+ }
+ }
+ cTS := TraceState{}
+ if found == n && n < maxListMembers {
+ cTS.list = make([]member, n+1)
+ } else {
+ cTS.list = make([]member, n)
}
- // When the number of members exceeds capacity, drop the "right-most".
- copy(cTS.list[1:], cTS.list)
cTS.list[0] = m
-
+ // When the number of members exceeds capacity, drop the "right-most".
+ copy(cTS.list[1:], ts.list[0:found])
+ if found < n {
+ copy(cTS.list[1+found:], ts.list[found+1:])
+ }
return cTS, nil
}
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
index dbb61a422795..e57bf57fce82 100644
--- a/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh
@@ -1,18 +1,7 @@
#!/bin/bash
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh
new file mode 100644
index 000000000000..1e87855eeaa8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort)
+
+missingReadme=false
+for dir in $dirs; do
+ if [ ! -f "$dir/README.md" ]; then
+ echo "couldn't find README.md for $dir"
+ missingReadme=true
+ fi
+done
+
+if [ "$missingReadme" = true ] ; then
+ echo "Error: some READMEs couldn't be found."
+ exit 1
+fi
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index e2f743585d1d..ab28960524b8 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -1,20 +1,9 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.21.0"
+ return "1.28.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 3c153c9d6fc6..241cfc82a8d0 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -1,32 +1,27 @@
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
module-sets:
stable-v1:
- version: v1.21.0
+ version: v1.28.0
modules:
- go.opentelemetry.io/otel
+ - go.opentelemetry.io/otel/bridge/opencensus
+ - go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/dice
- go.opentelemetry.io/otel/example/namedtracer
+ - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/otel-collector
- go.opentelemetry.io/otel/example/passthrough
- go.opentelemetry.io/otel/example/zipkin
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/metric
@@ -34,19 +29,21 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.44.0
+ version: v0.50.0
modules:
- - go.opentelemetry.io/otel/bridge/opencensus
- - go.opentelemetry.io/otel/bridge/opencensus/test
- - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus
- - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ experimental-logs:
+ version: v0.4.0
+ modules:
+ - go.opentelemetry.io/otel/log
+ - go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.7
+ version: v0.0.8
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
index 51a499816a6c..d7099c35bc41 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -36,6 +36,78 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // The zero value for the enum. Should not be used for comparisons.
+ // Instead use bitwise "and" with the appropriate mask as shown above.
+ SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
+ // Bits 0-7 are used for trace flags.
+ SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256
+ SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512
+)
+
+// Enum value maps for SpanFlags.
+var (
+ SpanFlags_name = map[int32]string{
+ 0: "SPAN_FLAGS_DO_NOT_USE",
+ 255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
+ 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK",
+ 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK",
+ }
+ SpanFlags_value = map[string]int32{
+ "SPAN_FLAGS_DO_NOT_USE": 0,
+ "SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
+ "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256,
+ "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512,
+ }
+)
+
+func (x SpanFlags) Enum() *SpanFlags {
+ p := new(SpanFlags)
+ *p = x
+ return p
+}
+
+func (x SpanFlags) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SpanFlags) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (SpanFlags) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x SpanFlags) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SpanFlags.Descriptor instead.
+func (SpanFlags) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type Span_SpanKind int32
@@ -94,11 +166,11 @@ func (x Span_SpanKind) String() string {
}
func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
- return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
}
func (Span_SpanKind) Type() protoreflect.EnumType {
- return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
}
func (x Span_SpanKind) Number() protoreflect.EnumNumber {
@@ -149,11 +221,11 @@ func (x Status_StatusCode) String() string {
}
func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor {
- return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor()
}
func (Status_StatusCode) Type() protoreflect.EnumType {
- return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2]
}
func (x Status_StatusCode) Number() protoreflect.EnumNumber {
@@ -238,6 +310,9 @@ type ResourceSpans struct {
Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
// A list of ScopeSpans that originate from a resource.
ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the resource data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_spans" field which have their own schema_url field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
@@ -308,6 +383,9 @@ type ScopeSpans struct {
Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
// A list of Spans that originate from an instrumentation scope.
Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all spans and span events in the "spans" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
@@ -394,6 +472,28 @@ type Span struct {
// The `span_id` of this span's parent span. If this is a root span, then this
// field must be empty. The ID is an 8-byte array.
ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
// A description of the span's operation.
//
// For example, the name can be a qualified method name or a file name
@@ -517,6 +617,13 @@ func (x *Span) GetParentSpanId() []byte {
return nil
}
+func (x *Span) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
func (x *Span) GetName() string {
if x != nil {
return x.Name
@@ -757,6 +864,24 @@ type Span_Link struct {
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (x *Span_Link) Reset() {
@@ -826,6 +951,13 @@ func (x *Span_Link) GetDroppedAttributesCount() uint32 {
return 0
}
+func (x *Span_Link) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor
var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
@@ -869,7 +1001,7 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05,
0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
+ 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e,
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49,
@@ -877,101 +1009,113 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61,
0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61,
0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04,
- 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
- 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53,
- 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a,
- 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78,
- 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61,
- 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b,
- 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f,
- 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54,
- 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
- 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f,
- 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40,
- 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
- 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
- 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
- 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
- 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e,
- 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a,
- 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d,
- 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
- 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12,
- 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70,
- 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61,
- 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
- 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a,
- 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
- 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e,
- 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e,
- 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54,
- 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f,
- 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a,
- 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e,
- 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
- 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53,
- 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45,
- 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18,
- 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
- 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61,
- 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a,
- 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53,
- 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54,
- 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
- 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53,
- 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08,
- 0x01, 0x10, 0x02, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67,
+ 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b,
+ 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+ 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06,
+ 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+ 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+ 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65,
+ 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
- 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
- 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c,
- 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b,
+ 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70,
+ 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69,
+ 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a,
+ 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64,
+ 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53,
+ 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41,
+ 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
+ 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41,
+ 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12,
+ 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f,
+ 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+ 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22,
+ 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55,
+ 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12,
+ 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b,
+ 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
+ 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a,
+ 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a,
+ 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e,
+ 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41,
+ 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50,
+ 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54,
+ 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d,
+ 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46,
+ 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f,
+ 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77,
+ 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -986,36 +1130,37 @@ func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData
}
-var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{
- (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind
- (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode
- (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData
- (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans
- (*ScopeSpans)(nil), // 4: opentelemetry.proto.trace.v1.ScopeSpans
- (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span
- (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status
- (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event
- (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link
- (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource
- (*v11.InstrumentationScope)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationScope
- (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue
+ (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags
+ (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind
+ (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode
+ (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData
+ (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans
+ (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans
+ (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span
+ (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status
+ (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event
+ (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link
+ (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource
+ (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope
+ (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue
}
var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{
- 3, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
- 9, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
- 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
- 10, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
- 5, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
- 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
- 11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
- 7, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
- 8, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
- 6, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
- 1, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
- 11, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
- 11, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+ 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+ 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
+ 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+ 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
+ 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
+ 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
+ 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
+ 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
+ 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
+ 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
13, // [13:13] is the sub-list for method output_type
13, // [13:13] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
@@ -1119,7 +1264,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc,
- NumEnums: 2,
+ NumEnums: 3,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go
index 425e8eecb06b..016e90215cdf 100644
--- a/vendor/golang.org/x/crypto/cast5/cast5.go
+++ b/vendor/golang.org/x/crypto/cast5/cast5.go
@@ -11,7 +11,7 @@
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
// golang.org/x/crypto/chacha20poly1305).
-package cast5 // import "golang.org/x/crypto/cast5"
+package cast5
import (
"errors"
diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go
index 8907183ec0ab..e664d127cb2a 100644
--- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go
+++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go
@@ -10,14 +10,15 @@
// for their specific task. If you are required to interoperate with OpenPGP
// systems and need a maintained package, consider a community fork.
// See https://golang.org/issue/44226.
-package armor // import "golang.org/x/crypto/openpgp/armor"
+package armor
import (
"bufio"
"bytes"
"encoding/base64"
- "golang.org/x/crypto/openpgp/errors"
"io"
+
+ "golang.org/x/crypto/openpgp/errors"
)
// A Block represents an OpenPGP armored structure.
diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
index 743b35a1204f..f922bdbcaa79 100644
--- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
+++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
@@ -16,7 +16,7 @@
// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has
// compatibility and security issues (see https://eprint.iacr.org/2021/923).
// Moreover, this package doesn't protect against side-channel attacks.
-package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
+package elgamal
import (
"crypto/rand"
diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go
index 1d7a0ea05adf..a328749471a6 100644
--- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go
+++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go
@@ -9,7 +9,7 @@
// for their specific task. If you are required to interoperate with OpenPGP
// systems and need a maintained package, consider a community fork.
// See https://golang.org/issue/44226.
-package errors // import "golang.org/x/crypto/openpgp/errors"
+package errors
import (
"strconv"
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
index 0a19794a8e49..a84a1a214e20 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
@@ -10,7 +10,7 @@
// for their specific task. If you are required to interoperate with OpenPGP
// systems and need a maintained package, consider a community fork.
// See https://golang.org/issue/44226.
-package packet // import "golang.org/x/crypto/openpgp/packet"
+package packet
import (
"bufio"
diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go
index 48a893146858..cff3db91969a 100644
--- a/vendor/golang.org/x/crypto/openpgp/read.go
+++ b/vendor/golang.org/x/crypto/openpgp/read.go
@@ -9,7 +9,7 @@
// for their specific task. If you are required to interoperate with OpenPGP
// systems and need a maintained package, consider a community fork.
// See https://golang.org/issue/44226.
-package openpgp // import "golang.org/x/crypto/openpgp"
+package openpgp
import (
"crypto"
diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
index f53244a1c7b5..fa1a91907957 100644
--- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
+++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
@@ -10,7 +10,7 @@
// for their specific task. If you are required to interoperate with OpenPGP
// systems and need a maintained package, consider a community fork.
// See https://golang.org/issue/44226.
-package s2k // import "golang.org/x/crypto/openpgp/s2k"
+package s2k
import (
"crypto"
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
index 904b57e01d7a..28cd99c7f3fc 100644
--- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
choose, you can pass the `New` functions from the different SHA packages to
pbkdf2.Key.
*/
-package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+package pbkdf2
import (
"crypto/hmac"
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 000000000000..6a66aea5eafe
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 000000000000..733099041f84
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000000..2c033dff47e9
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go
new file mode 100644
index 000000000000..ecc0dabb74d9
--- /dev/null
+++ b/vendor/golang.org/x/exp/maps/maps.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package maps defines various functions useful with maps of any type.
+package maps
+
+// Keys returns the keys of the map m.
+// The keys will be in an indeterminate order.
+func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+// Values returns the values of the map m.
+// The values will be in an indeterminate order.
+func Values[M ~map[K]V, K comparable, V any](m M) []V {
+ r := make([]V, 0, len(m))
+ for _, v := range m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two maps contain the same key/value pairs.
+// Values are compared using ==.
+func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[k]; !ok || v1 != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc is like Equal, but compares values using eq.
+// Keys are still compared with ==.
+func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[k]; !ok || !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Clear removes all entries from m, leaving it empty.
+func Clear[M ~map[K]V, K comparable, V any](m M) {
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// Clone returns a copy of m. This is a shallow clone:
+// the new keys and values are set using ordinary assignment.
+func Clone[M ~map[K]V, K comparable, V any](m M) M {
+ // Preserve nil in case it matters.
+ if m == nil {
+ return nil
+ }
+ r := make(M, len(m))
+ for k, v := range m {
+ r[k] = v
+ }
+ return r
+}
+
+// Copy copies all key/value pairs in src adding them to dst.
+// When a key in src is already present in dst,
+// the value in dst will be overwritten by the value associated
+// with the key in src.
+func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
+
+// DeleteFunc deletes any key/value pairs from m for which del returns true.
+func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
+ for k, v := range m {
+ if del(k, v) {
+ delete(m, k)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
new file mode 100644
index 000000000000..fbf1934a0617
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000000..5e8158bba869
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,499 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+package slices
+
+import (
+ "unsafe"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using an equality
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[S ~[]E, E comparable](s S, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
+ copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
+ return s2
+ }
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// When DeleteFunc removes m elements, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage
+// collected.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000000..b67897f76b5d
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,195 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b.
+//
+// SortFunc requires that cmp is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ n := len(x)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmpLess(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmp(x[i], x[i-1]) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+}
+
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
new file mode 100644
index 000000000000..06f2c7a2481b
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownCmpFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
+ child++
+ }
+ if !(cmp(data[first+root], data[first+child]) < 0) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownCmpFunc(data, i, hi, first, cmp)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownCmpFunc(data, lo, i, first, cmp)
+ }
+}
+
+// pdqsortCmpFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: /~https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsCmpFunc(data, a, b, cmp)
+ limit--
+ }
+
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
+ if hint == decreasingHint {
+ reverseRangeCmpFunc(data, a, b, cmp)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
+ b = mid
+ }
+ }
+}
+
+// partitionCmpFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000000..99b47c3986a4
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: /~https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !cmpLess(data[a], data[i]) {
+ i++
+ }
+ for i <= j && cmpLess(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !cmpLess(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if cmpLess(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmpLess(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !cmpLess(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !cmpLess(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE
index 6a66aea5eafe..2a7cf70da6e4 100644
--- a/vendor/golang.org/x/mod/LICENSE
+++ b/vendor/golang.org/x/mod/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go
new file mode 100644
index 000000000000..cd0a8ac15451
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/atom.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atom provides integer codes (also known as atoms) for a fixed set of
+// frequently occurring HTML strings: tag names and attribute keys such as "p"
+// and "id".
+//
+// Sharing an atom's name between all elements with the same tag can result in
+// fewer string allocations when tokenizing and parsing HTML. Integer
+// comparisons are also generally faster than string comparisons.
+//
+// The value of an atom's particular code is not guaranteed to stay the same
+// between versions of this package. Neither is any ordering guaranteed:
+// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
+// be dense. The only guarantees are that e.g. looking up "div" will yield
+// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
+package atom // import "golang.org/x/net/html/atom"
+
+// Atom is an integer code for a string. The zero value maps to "".
+type Atom uint32
+
+// String returns the atom's name.
+func (a Atom) String() string {
+ start := uint32(a >> 8)
+ n := uint32(a & 0xff)
+ if start+n > uint32(len(atomText)) {
+ return ""
+ }
+ return atomText[start : start+n]
+}
+
+func (a Atom) string() string {
+ return atomText[a>>8 : a>>8+a&0xff]
+}
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s []byte) uint32 {
+ for i := range s {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+func match(s string, t []byte) bool {
+ for i, c := range t {
+ if s[i] != c {
+ return false
+ }
+ }
+ return true
+}
+
+// Lookup returns the atom whose name is s. It returns zero if there is no
+// such atom. The lookup is case sensitive.
+func Lookup(s []byte) Atom {
+ if len(s) == 0 || len(s) > maxAtomLen {
+ return 0
+ }
+ h := fnv(hash0, s)
+ if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ return 0
+}
+
+// String returns a string whose contents are equal to s. In that sense, it is
+// equivalent to string(s) but may be more efficient.
+func String(s []byte) string {
+ if a := Lookup(s); a != 0 {
+ return a.String()
+ }
+ return string(s)
+}
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
new file mode 100644
index 000000000000..2a938864cb9d
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -0,0 +1,783 @@
+// Code generated by go generate gen.go; DO NOT EDIT.
+
+//go:generate go run gen.go
+
+package atom
+
+const (
+ A Atom = 0x1
+ Abbr Atom = 0x4
+ Accept Atom = 0x1a06
+ AcceptCharset Atom = 0x1a0e
+ Accesskey Atom = 0x2c09
+ Acronym Atom = 0xaa07
+ Action Atom = 0x27206
+ Address Atom = 0x6f307
+ Align Atom = 0xb105
+ Allowfullscreen Atom = 0x2080f
+ Allowpaymentrequest Atom = 0xc113
+ Allowusermedia Atom = 0xdd0e
+ Alt Atom = 0xf303
+ Annotation Atom = 0x1c90a
+ AnnotationXml Atom = 0x1c90e
+ Applet Atom = 0x31906
+ Area Atom = 0x35604
+ Article Atom = 0x3fc07
+ As Atom = 0x3c02
+ Aside Atom = 0x10705
+ Async Atom = 0xff05
+ Audio Atom = 0x11505
+ Autocomplete Atom = 0x2780c
+ Autofocus Atom = 0x12109
+ Autoplay Atom = 0x13c08
+ B Atom = 0x101
+ Base Atom = 0x3b04
+ Basefont Atom = 0x3b08
+ Bdi Atom = 0xba03
+ Bdo Atom = 0x14b03
+ Bgsound Atom = 0x15e07
+ Big Atom = 0x17003
+ Blink Atom = 0x17305
+ Blockquote Atom = 0x1870a
+ Body Atom = 0x2804
+ Br Atom = 0x202
+ Button Atom = 0x19106
+ Canvas Atom = 0x10306
+ Caption Atom = 0x23107
+ Center Atom = 0x22006
+ Challenge Atom = 0x29b09
+ Charset Atom = 0x2107
+ Checked Atom = 0x47907
+ Cite Atom = 0x19c04
+ Class Atom = 0x56405
+ Code Atom = 0x5c504
+ Col Atom = 0x1ab03
+ Colgroup Atom = 0x1ab08
+ Color Atom = 0x1bf05
+ Cols Atom = 0x1c404
+ Colspan Atom = 0x1c407
+ Command Atom = 0x1d707
+ Content Atom = 0x58b07
+ Contenteditable Atom = 0x58b0f
+ Contextmenu Atom = 0x3800b
+ Controls Atom = 0x1de08
+ Coords Atom = 0x1ea06
+ Crossorigin Atom = 0x1fb0b
+ Data Atom = 0x4a504
+ Datalist Atom = 0x4a508
+ Datetime Atom = 0x2b808
+ Dd Atom = 0x2d702
+ Default Atom = 0x10a07
+ Defer Atom = 0x5c705
+ Del Atom = 0x45203
+ Desc Atom = 0x56104
+ Details Atom = 0x7207
+ Dfn Atom = 0x8703
+ Dialog Atom = 0xbb06
+ Dir Atom = 0x9303
+ Dirname Atom = 0x9307
+ Disabled Atom = 0x16408
+ Div Atom = 0x16b03
+ Dl Atom = 0x5e602
+ Download Atom = 0x46308
+ Draggable Atom = 0x17a09
+ Dropzone Atom = 0x40508
+ Dt Atom = 0x64b02
+ Em Atom = 0x6e02
+ Embed Atom = 0x6e05
+ Enctype Atom = 0x28d07
+ Face Atom = 0x21e04
+ Fieldset Atom = 0x22608
+ Figcaption Atom = 0x22e0a
+ Figure Atom = 0x24806
+ Font Atom = 0x3f04
+ Footer Atom = 0xf606
+ For Atom = 0x25403
+ ForeignObject Atom = 0x2540d
+ Foreignobject Atom = 0x2610d
+ Form Atom = 0x26e04
+ Formaction Atom = 0x26e0a
+ Formenctype Atom = 0x2890b
+ Formmethod Atom = 0x2a40a
+ Formnovalidate Atom = 0x2ae0e
+ Formtarget Atom = 0x2c00a
+ Frame Atom = 0x8b05
+ Frameset Atom = 0x8b08
+ H1 Atom = 0x15c02
+ H2 Atom = 0x2de02
+ H3 Atom = 0x30d02
+ H4 Atom = 0x34502
+ H5 Atom = 0x34f02
+ H6 Atom = 0x64d02
+ Head Atom = 0x33104
+ Header Atom = 0x33106
+ Headers Atom = 0x33107
+ Height Atom = 0x5206
+ Hgroup Atom = 0x2ca06
+ Hidden Atom = 0x2d506
+ High Atom = 0x2db04
+ Hr Atom = 0x15702
+ Href Atom = 0x2e004
+ Hreflang Atom = 0x2e008
+ Html Atom = 0x5604
+ HttpEquiv Atom = 0x2e80a
+ I Atom = 0x601
+ Icon Atom = 0x58a04
+ Id Atom = 0x10902
+ Iframe Atom = 0x2fc06
+ Image Atom = 0x30205
+ Img Atom = 0x30703
+ Input Atom = 0x44b05
+ Inputmode Atom = 0x44b09
+ Ins Atom = 0x20403
+ Integrity Atom = 0x23f09
+ Is Atom = 0x16502
+ Isindex Atom = 0x30f07
+ Ismap Atom = 0x31605
+ Itemid Atom = 0x38b06
+ Itemprop Atom = 0x19d08
+ Itemref Atom = 0x3cd07
+ Itemscope Atom = 0x67109
+ Itemtype Atom = 0x31f08
+ Kbd Atom = 0xb903
+ Keygen Atom = 0x3206
+ Keytype Atom = 0xd607
+ Kind Atom = 0x17704
+ Label Atom = 0x5905
+ Lang Atom = 0x2e404
+ Legend Atom = 0x18106
+ Li Atom = 0xb202
+ Link Atom = 0x17404
+ List Atom = 0x4a904
+ Listing Atom = 0x4a907
+ Loop Atom = 0x5d04
+ Low Atom = 0xc303
+ Main Atom = 0x1004
+ Malignmark Atom = 0xb00a
+ Manifest Atom = 0x6d708
+ Map Atom = 0x31803
+ Mark Atom = 0xb604
+ Marquee Atom = 0x32707
+ Math Atom = 0x32e04
+ Max Atom = 0x33d03
+ Maxlength Atom = 0x33d09
+ Media Atom = 0xe605
+ Mediagroup Atom = 0xe60a
+ Menu Atom = 0x38704
+ Menuitem Atom = 0x38708
+ Meta Atom = 0x4b804
+ Meter Atom = 0x9805
+ Method Atom = 0x2a806
+ Mglyph Atom = 0x30806
+ Mi Atom = 0x34702
+ Min Atom = 0x34703
+ Minlength Atom = 0x34709
+ Mn Atom = 0x2b102
+ Mo Atom = 0xa402
+ Ms Atom = 0x67402
+ Mtext Atom = 0x35105
+ Multiple Atom = 0x35f08
+ Muted Atom = 0x36705
+ Name Atom = 0x9604
+ Nav Atom = 0x1303
+ Nobr Atom = 0x3704
+ Noembed Atom = 0x6c07
+ Noframes Atom = 0x8908
+ Nomodule Atom = 0xa208
+ Nonce Atom = 0x1a605
+ Noscript Atom = 0x21608
+ Novalidate Atom = 0x2b20a
+ Object Atom = 0x26806
+ Ol Atom = 0x13702
+ Onabort Atom = 0x19507
+ Onafterprint Atom = 0x2360c
+ Onautocomplete Atom = 0x2760e
+ Onautocompleteerror Atom = 0x27613
+ Onauxclick Atom = 0x61f0a
+ Onbeforeprint Atom = 0x69e0d
+ Onbeforeunload Atom = 0x6e70e
+ Onblur Atom = 0x56d06
+ Oncancel Atom = 0x11908
+ Oncanplay Atom = 0x14d09
+ Oncanplaythrough Atom = 0x14d10
+ Onchange Atom = 0x41b08
+ Onclick Atom = 0x2f507
+ Onclose Atom = 0x36c07
+ Oncontextmenu Atom = 0x37e0d
+ Oncopy Atom = 0x39106
+ Oncuechange Atom = 0x3970b
+ Oncut Atom = 0x3a205
+ Ondblclick Atom = 0x3a70a
+ Ondrag Atom = 0x3b106
+ Ondragend Atom = 0x3b109
+ Ondragenter Atom = 0x3ba0b
+ Ondragexit Atom = 0x3c50a
+ Ondragleave Atom = 0x3df0b
+ Ondragover Atom = 0x3ea0a
+ Ondragstart Atom = 0x3f40b
+ Ondrop Atom = 0x40306
+ Ondurationchange Atom = 0x41310
+ Onemptied Atom = 0x40a09
+ Onended Atom = 0x42307
+ Onerror Atom = 0x42a07
+ Onfocus Atom = 0x43107
+ Onhashchange Atom = 0x43d0c
+ Oninput Atom = 0x44907
+ Oninvalid Atom = 0x45509
+ Onkeydown Atom = 0x45e09
+ Onkeypress Atom = 0x46b0a
+ Onkeyup Atom = 0x48007
+ Onlanguagechange Atom = 0x48d10
+ Onload Atom = 0x49d06
+ Onloadeddata Atom = 0x49d0c
+ Onloadedmetadata Atom = 0x4b010
+ Onloadend Atom = 0x4c609
+ Onloadstart Atom = 0x4cf0b
+ Onmessage Atom = 0x4da09
+ Onmessageerror Atom = 0x4da0e
+ Onmousedown Atom = 0x4e80b
+ Onmouseenter Atom = 0x4f30c
+ Onmouseleave Atom = 0x4ff0c
+ Onmousemove Atom = 0x50b0b
+ Onmouseout Atom = 0x5160a
+ Onmouseover Atom = 0x5230b
+ Onmouseup Atom = 0x52e09
+ Onmousewheel Atom = 0x53c0c
+ Onoffline Atom = 0x54809
+ Ononline Atom = 0x55108
+ Onpagehide Atom = 0x5590a
+ Onpageshow Atom = 0x5730a
+ Onpaste Atom = 0x57f07
+ Onpause Atom = 0x59a07
+ Onplay Atom = 0x5a406
+ Onplaying Atom = 0x5a409
+ Onpopstate Atom = 0x5ad0a
+ Onprogress Atom = 0x5b70a
+ Onratechange Atom = 0x5cc0c
+ Onrejectionhandled Atom = 0x5d812
+ Onreset Atom = 0x5ea07
+ Onresize Atom = 0x5f108
+ Onscroll Atom = 0x60008
+ Onsecuritypolicyviolation Atom = 0x60819
+ Onseeked Atom = 0x62908
+ Onseeking Atom = 0x63109
+ Onselect Atom = 0x63a08
+ Onshow Atom = 0x64406
+ Onsort Atom = 0x64f06
+ Onstalled Atom = 0x65909
+ Onstorage Atom = 0x66209
+ Onsubmit Atom = 0x66b08
+ Onsuspend Atom = 0x67b09
+ Ontimeupdate Atom = 0x400c
+ Ontoggle Atom = 0x68408
+ Onunhandledrejection Atom = 0x68c14
+ Onunload Atom = 0x6ab08
+ Onvolumechange Atom = 0x6b30e
+ Onwaiting Atom = 0x6c109
+ Onwheel Atom = 0x6ca07
+ Open Atom = 0x1a304
+ Optgroup Atom = 0x5f08
+ Optimum Atom = 0x6d107
+ Option Atom = 0x6e306
+ Output Atom = 0x51d06
+ P Atom = 0xc01
+ Param Atom = 0xc05
+ Pattern Atom = 0x6607
+ Picture Atom = 0x7b07
+ Ping Atom = 0xef04
+ Placeholder Atom = 0x1310b
+ Plaintext Atom = 0x1b209
+ Playsinline Atom = 0x1400b
+ Poster Atom = 0x2cf06
+ Pre Atom = 0x47003
+ Preload Atom = 0x48607
+ Progress Atom = 0x5b908
+ Prompt Atom = 0x53606
+ Public Atom = 0x58606
+ Q Atom = 0xcf01
+ Radiogroup Atom = 0x30a
+ Rb Atom = 0x3a02
+ Readonly Atom = 0x35708
+ Referrerpolicy Atom = 0x3d10e
+ Rel Atom = 0x48703
+ Required Atom = 0x24c08
+ Reversed Atom = 0x8008
+ Rows Atom = 0x9c04
+ Rowspan Atom = 0x9c07
+ Rp Atom = 0x23c02
+ Rt Atom = 0x19a02
+ Rtc Atom = 0x19a03
+ Ruby Atom = 0xfb04
+ S Atom = 0x2501
+ Samp Atom = 0x7804
+ Sandbox Atom = 0x12907
+ Scope Atom = 0x67505
+ Scoped Atom = 0x67506
+ Script Atom = 0x21806
+ Seamless Atom = 0x37108
+ Section Atom = 0x56807
+ Select Atom = 0x63c06
+ Selected Atom = 0x63c08
+ Shape Atom = 0x1e505
+ Size Atom = 0x5f504
+ Sizes Atom = 0x5f505
+ Slot Atom = 0x1ef04
+ Small Atom = 0x20605
+ Sortable Atom = 0x65108
+ Sorted Atom = 0x33706
+ Source Atom = 0x37806
+ Spacer Atom = 0x43706
+ Span Atom = 0x9f04
+ Spellcheck Atom = 0x4740a
+ Src Atom = 0x5c003
+ Srcdoc Atom = 0x5c006
+ Srclang Atom = 0x5f907
+ Srcset Atom = 0x6f906
+ Start Atom = 0x3fa05
+ Step Atom = 0x58304
+ Strike Atom = 0xd206
+ Strong Atom = 0x6dd06
+ Style Atom = 0x6ff05
+ Sub Atom = 0x66d03
+ Summary Atom = 0x70407
+ Sup Atom = 0x70b03
+ Svg Atom = 0x70e03
+ System Atom = 0x71106
+ Tabindex Atom = 0x4be08
+ Table Atom = 0x59505
+ Target Atom = 0x2c406
+ Tbody Atom = 0x2705
+ Td Atom = 0x9202
+ Template Atom = 0x71408
+ Textarea Atom = 0x35208
+ Tfoot Atom = 0xf505
+ Th Atom = 0x15602
+ Thead Atom = 0x33005
+ Time Atom = 0x4204
+ Title Atom = 0x11005
+ Tr Atom = 0xcc02
+ Track Atom = 0x1ba05
+ Translate Atom = 0x1f209
+ Tt Atom = 0x6802
+ Type Atom = 0xd904
+ Typemustmatch Atom = 0x2900d
+ U Atom = 0xb01
+ Ul Atom = 0xa702
+ Updateviacache Atom = 0x460e
+ Usemap Atom = 0x59e06
+ Value Atom = 0x1505
+ Var Atom = 0x16d03
+ Video Atom = 0x2f105
+ Wbr Atom = 0x57c03
+ Width Atom = 0x64905
+ Workertype Atom = 0x71c0a
+ Wrap Atom = 0x72604
+ Xmp Atom = 0x12f03
+)
+
+const hash0 = 0x81cdf10e
+
+const maxAtomLen = 25
+
+var table = [1 << 9]Atom{
+ 0x1: 0xe60a, // mediagroup
+ 0x2: 0x2e404, // lang
+ 0x4: 0x2c09, // accesskey
+ 0x5: 0x8b08, // frameset
+ 0x7: 0x63a08, // onselect
+ 0x8: 0x71106, // system
+ 0xa: 0x64905, // width
+ 0xc: 0x2890b, // formenctype
+ 0xd: 0x13702, // ol
+ 0xe: 0x3970b, // oncuechange
+ 0x10: 0x14b03, // bdo
+ 0x11: 0x11505, // audio
+ 0x12: 0x17a09, // draggable
+ 0x14: 0x2f105, // video
+ 0x15: 0x2b102, // mn
+ 0x16: 0x38704, // menu
+ 0x17: 0x2cf06, // poster
+ 0x19: 0xf606, // footer
+ 0x1a: 0x2a806, // method
+ 0x1b: 0x2b808, // datetime
+ 0x1c: 0x19507, // onabort
+ 0x1d: 0x460e, // updateviacache
+ 0x1e: 0xff05, // async
+ 0x1f: 0x49d06, // onload
+ 0x21: 0x11908, // oncancel
+ 0x22: 0x62908, // onseeked
+ 0x23: 0x30205, // image
+ 0x24: 0x5d812, // onrejectionhandled
+ 0x26: 0x17404, // link
+ 0x27: 0x51d06, // output
+ 0x28: 0x33104, // head
+ 0x29: 0x4ff0c, // onmouseleave
+ 0x2a: 0x57f07, // onpaste
+ 0x2b: 0x5a409, // onplaying
+ 0x2c: 0x1c407, // colspan
+ 0x2f: 0x1bf05, // color
+ 0x30: 0x5f504, // size
+ 0x31: 0x2e80a, // http-equiv
+ 0x33: 0x601, // i
+ 0x34: 0x5590a, // onpagehide
+ 0x35: 0x68c14, // onunhandledrejection
+ 0x37: 0x42a07, // onerror
+ 0x3a: 0x3b08, // basefont
+ 0x3f: 0x1303, // nav
+ 0x40: 0x17704, // kind
+ 0x41: 0x35708, // readonly
+ 0x42: 0x30806, // mglyph
+ 0x44: 0xb202, // li
+ 0x46: 0x2d506, // hidden
+ 0x47: 0x70e03, // svg
+ 0x48: 0x58304, // step
+ 0x49: 0x23f09, // integrity
+ 0x4a: 0x58606, // public
+ 0x4c: 0x1ab03, // col
+ 0x4d: 0x1870a, // blockquote
+ 0x4e: 0x34f02, // h5
+ 0x50: 0x5b908, // progress
+ 0x51: 0x5f505, // sizes
+ 0x52: 0x34502, // h4
+ 0x56: 0x33005, // thead
+ 0x57: 0xd607, // keytype
+ 0x58: 0x5b70a, // onprogress
+ 0x59: 0x44b09, // inputmode
+ 0x5a: 0x3b109, // ondragend
+ 0x5d: 0x3a205, // oncut
+ 0x5e: 0x43706, // spacer
+ 0x5f: 0x1ab08, // colgroup
+ 0x62: 0x16502, // is
+ 0x65: 0x3c02, // as
+ 0x66: 0x54809, // onoffline
+ 0x67: 0x33706, // sorted
+ 0x69: 0x48d10, // onlanguagechange
+ 0x6c: 0x43d0c, // onhashchange
+ 0x6d: 0x9604, // name
+ 0x6e: 0xf505, // tfoot
+ 0x6f: 0x56104, // desc
+ 0x70: 0x33d03, // max
+ 0x72: 0x1ea06, // coords
+ 0x73: 0x30d02, // h3
+ 0x74: 0x6e70e, // onbeforeunload
+ 0x75: 0x9c04, // rows
+ 0x76: 0x63c06, // select
+ 0x77: 0x9805, // meter
+ 0x78: 0x38b06, // itemid
+ 0x79: 0x53c0c, // onmousewheel
+ 0x7a: 0x5c006, // srcdoc
+ 0x7d: 0x1ba05, // track
+ 0x7f: 0x31f08, // itemtype
+ 0x82: 0xa402, // mo
+ 0x83: 0x41b08, // onchange
+ 0x84: 0x33107, // headers
+ 0x85: 0x5cc0c, // onratechange
+ 0x86: 0x60819, // onsecuritypolicyviolation
+ 0x88: 0x4a508, // datalist
+ 0x89: 0x4e80b, // onmousedown
+ 0x8a: 0x1ef04, // slot
+ 0x8b: 0x4b010, // onloadedmetadata
+ 0x8c: 0x1a06, // accept
+ 0x8d: 0x26806, // object
+ 0x91: 0x6b30e, // onvolumechange
+ 0x92: 0x2107, // charset
+ 0x93: 0x27613, // onautocompleteerror
+ 0x94: 0xc113, // allowpaymentrequest
+ 0x95: 0x2804, // body
+ 0x96: 0x10a07, // default
+ 0x97: 0x63c08, // selected
+ 0x98: 0x21e04, // face
+ 0x99: 0x1e505, // shape
+ 0x9b: 0x68408, // ontoggle
+ 0x9e: 0x64b02, // dt
+ 0x9f: 0xb604, // mark
+ 0xa1: 0xb01, // u
+ 0xa4: 0x6ab08, // onunload
+ 0xa5: 0x5d04, // loop
+ 0xa6: 0x16408, // disabled
+ 0xaa: 0x42307, // onended
+ 0xab: 0xb00a, // malignmark
+ 0xad: 0x67b09, // onsuspend
+ 0xae: 0x35105, // mtext
+ 0xaf: 0x64f06, // onsort
+ 0xb0: 0x19d08, // itemprop
+ 0xb3: 0x67109, // itemscope
+ 0xb4: 0x17305, // blink
+ 0xb6: 0x3b106, // ondrag
+ 0xb7: 0xa702, // ul
+ 0xb8: 0x26e04, // form
+ 0xb9: 0x12907, // sandbox
+ 0xba: 0x8b05, // frame
+ 0xbb: 0x1505, // value
+ 0xbc: 0x66209, // onstorage
+ 0xbf: 0xaa07, // acronym
+ 0xc0: 0x19a02, // rt
+ 0xc2: 0x202, // br
+ 0xc3: 0x22608, // fieldset
+ 0xc4: 0x2900d, // typemustmatch
+ 0xc5: 0xa208, // nomodule
+ 0xc6: 0x6c07, // noembed
+ 0xc7: 0x69e0d, // onbeforeprint
+ 0xc8: 0x19106, // button
+ 0xc9: 0x2f507, // onclick
+ 0xca: 0x70407, // summary
+ 0xcd: 0xfb04, // ruby
+ 0xce: 0x56405, // class
+ 0xcf: 0x3f40b, // ondragstart
+ 0xd0: 0x23107, // caption
+ 0xd4: 0xdd0e, // allowusermedia
+ 0xd5: 0x4cf0b, // onloadstart
+ 0xd9: 0x16b03, // div
+ 0xda: 0x4a904, // list
+ 0xdb: 0x32e04, // math
+ 0xdc: 0x44b05, // input
+ 0xdf: 0x3ea0a, // ondragover
+ 0xe0: 0x2de02, // h2
+ 0xe2: 0x1b209, // plaintext
+ 0xe4: 0x4f30c, // onmouseenter
+ 0xe7: 0x47907, // checked
+ 0xe8: 0x47003, // pre
+ 0xea: 0x35f08, // multiple
+ 0xeb: 0xba03, // bdi
+ 0xec: 0x33d09, // maxlength
+ 0xed: 0xcf01, // q
+ 0xee: 0x61f0a, // onauxclick
+ 0xf0: 0x57c03, // wbr
+ 0xf2: 0x3b04, // base
+ 0xf3: 0x6e306, // option
+ 0xf5: 0x41310, // ondurationchange
+ 0xf7: 0x8908, // noframes
+ 0xf9: 0x40508, // dropzone
+ 0xfb: 0x67505, // scope
+ 0xfc: 0x8008, // reversed
+ 0xfd: 0x3ba0b, // ondragenter
+ 0xfe: 0x3fa05, // start
+ 0xff: 0x12f03, // xmp
+ 0x100: 0x5f907, // srclang
+ 0x101: 0x30703, // img
+ 0x104: 0x101, // b
+ 0x105: 0x25403, // for
+ 0x106: 0x10705, // aside
+ 0x107: 0x44907, // oninput
+ 0x108: 0x35604, // area
+ 0x109: 0x2a40a, // formmethod
+ 0x10a: 0x72604, // wrap
+ 0x10c: 0x23c02, // rp
+ 0x10d: 0x46b0a, // onkeypress
+ 0x10e: 0x6802, // tt
+ 0x110: 0x34702, // mi
+ 0x111: 0x36705, // muted
+ 0x112: 0xf303, // alt
+ 0x113: 0x5c504, // code
+ 0x114: 0x6e02, // em
+ 0x115: 0x3c50a, // ondragexit
+ 0x117: 0x9f04, // span
+ 0x119: 0x6d708, // manifest
+ 0x11a: 0x38708, // menuitem
+ 0x11b: 0x58b07, // content
+ 0x11d: 0x6c109, // onwaiting
+ 0x11f: 0x4c609, // onloadend
+ 0x121: 0x37e0d, // oncontextmenu
+ 0x123: 0x56d06, // onblur
+ 0x124: 0x3fc07, // article
+ 0x125: 0x9303, // dir
+ 0x126: 0xef04, // ping
+ 0x127: 0x24c08, // required
+ 0x128: 0x45509, // oninvalid
+ 0x129: 0xb105, // align
+ 0x12b: 0x58a04, // icon
+ 0x12c: 0x64d02, // h6
+ 0x12d: 0x1c404, // cols
+ 0x12e: 0x22e0a, // figcaption
+ 0x12f: 0x45e09, // onkeydown
+ 0x130: 0x66b08, // onsubmit
+ 0x131: 0x14d09, // oncanplay
+ 0x132: 0x70b03, // sup
+ 0x133: 0xc01, // p
+ 0x135: 0x40a09, // onemptied
+ 0x136: 0x39106, // oncopy
+ 0x137: 0x19c04, // cite
+ 0x138: 0x3a70a, // ondblclick
+ 0x13a: 0x50b0b, // onmousemove
+ 0x13c: 0x66d03, // sub
+ 0x13d: 0x48703, // rel
+ 0x13e: 0x5f08, // optgroup
+ 0x142: 0x9c07, // rowspan
+ 0x143: 0x37806, // source
+ 0x144: 0x21608, // noscript
+ 0x145: 0x1a304, // open
+ 0x146: 0x20403, // ins
+ 0x147: 0x2540d, // foreignObject
+ 0x148: 0x5ad0a, // onpopstate
+ 0x14a: 0x28d07, // enctype
+ 0x14b: 0x2760e, // onautocomplete
+ 0x14c: 0x35208, // textarea
+ 0x14e: 0x2780c, // autocomplete
+ 0x14f: 0x15702, // hr
+ 0x150: 0x1de08, // controls
+ 0x151: 0x10902, // id
+ 0x153: 0x2360c, // onafterprint
+ 0x155: 0x2610d, // foreignobject
+ 0x156: 0x32707, // marquee
+ 0x157: 0x59a07, // onpause
+ 0x158: 0x5e602, // dl
+ 0x159: 0x5206, // height
+ 0x15a: 0x34703, // min
+ 0x15b: 0x9307, // dirname
+ 0x15c: 0x1f209, // translate
+ 0x15d: 0x5604, // html
+ 0x15e: 0x34709, // minlength
+ 0x15f: 0x48607, // preload
+ 0x160: 0x71408, // template
+ 0x161: 0x3df0b, // ondragleave
+ 0x162: 0x3a02, // rb
+ 0x164: 0x5c003, // src
+ 0x165: 0x6dd06, // strong
+ 0x167: 0x7804, // samp
+ 0x168: 0x6f307, // address
+ 0x169: 0x55108, // ononline
+ 0x16b: 0x1310b, // placeholder
+ 0x16c: 0x2c406, // target
+ 0x16d: 0x20605, // small
+ 0x16e: 0x6ca07, // onwheel
+ 0x16f: 0x1c90a, // annotation
+ 0x170: 0x4740a, // spellcheck
+ 0x171: 0x7207, // details
+ 0x172: 0x10306, // canvas
+ 0x173: 0x12109, // autofocus
+ 0x174: 0xc05, // param
+ 0x176: 0x46308, // download
+ 0x177: 0x45203, // del
+ 0x178: 0x36c07, // onclose
+ 0x179: 0xb903, // kbd
+ 0x17a: 0x31906, // applet
+ 0x17b: 0x2e004, // href
+ 0x17c: 0x5f108, // onresize
+ 0x17e: 0x49d0c, // onloadeddata
+ 0x180: 0xcc02, // tr
+ 0x181: 0x2c00a, // formtarget
+ 0x182: 0x11005, // title
+ 0x183: 0x6ff05, // style
+ 0x184: 0xd206, // strike
+ 0x185: 0x59e06, // usemap
+ 0x186: 0x2fc06, // iframe
+ 0x187: 0x1004, // main
+ 0x189: 0x7b07, // picture
+ 0x18c: 0x31605, // ismap
+ 0x18e: 0x4a504, // data
+ 0x18f: 0x5905, // label
+ 0x191: 0x3d10e, // referrerpolicy
+ 0x192: 0x15602, // th
+ 0x194: 0x53606, // prompt
+ 0x195: 0x56807, // section
+ 0x197: 0x6d107, // optimum
+ 0x198: 0x2db04, // high
+ 0x199: 0x15c02, // h1
+ 0x19a: 0x65909, // onstalled
+ 0x19b: 0x16d03, // var
+ 0x19c: 0x4204, // time
+ 0x19e: 0x67402, // ms
+ 0x19f: 0x33106, // header
+ 0x1a0: 0x4da09, // onmessage
+ 0x1a1: 0x1a605, // nonce
+ 0x1a2: 0x26e0a, // formaction
+ 0x1a3: 0x22006, // center
+ 0x1a4: 0x3704, // nobr
+ 0x1a5: 0x59505, // table
+ 0x1a6: 0x4a907, // listing
+ 0x1a7: 0x18106, // legend
+ 0x1a9: 0x29b09, // challenge
+ 0x1aa: 0x24806, // figure
+ 0x1ab: 0xe605, // media
+ 0x1ae: 0xd904, // type
+ 0x1af: 0x3f04, // font
+ 0x1b0: 0x4da0e, // onmessageerror
+ 0x1b1: 0x37108, // seamless
+ 0x1b2: 0x8703, // dfn
+ 0x1b3: 0x5c705, // defer
+ 0x1b4: 0xc303, // low
+ 0x1b5: 0x19a03, // rtc
+ 0x1b6: 0x5230b, // onmouseover
+ 0x1b7: 0x2b20a, // novalidate
+ 0x1b8: 0x71c0a, // workertype
+ 0x1ba: 0x3cd07, // itemref
+ 0x1bd: 0x1, // a
+ 0x1be: 0x31803, // map
+ 0x1bf: 0x400c, // ontimeupdate
+ 0x1c0: 0x15e07, // bgsound
+ 0x1c1: 0x3206, // keygen
+ 0x1c2: 0x2705, // tbody
+ 0x1c5: 0x64406, // onshow
+ 0x1c7: 0x2501, // s
+ 0x1c8: 0x6607, // pattern
+ 0x1cc: 0x14d10, // oncanplaythrough
+ 0x1ce: 0x2d702, // dd
+ 0x1cf: 0x6f906, // srcset
+ 0x1d0: 0x17003, // big
+ 0x1d2: 0x65108, // sortable
+ 0x1d3: 0x48007, // onkeyup
+ 0x1d5: 0x5a406, // onplay
+ 0x1d7: 0x4b804, // meta
+ 0x1d8: 0x40306, // ondrop
+ 0x1da: 0x60008, // onscroll
+ 0x1db: 0x1fb0b, // crossorigin
+ 0x1dc: 0x5730a, // onpageshow
+ 0x1dd: 0x4, // abbr
+ 0x1de: 0x9202, // td
+ 0x1df: 0x58b0f, // contenteditable
+ 0x1e0: 0x27206, // action
+ 0x1e1: 0x1400b, // playsinline
+ 0x1e2: 0x43107, // onfocus
+ 0x1e3: 0x2e008, // hreflang
+ 0x1e5: 0x5160a, // onmouseout
+ 0x1e6: 0x5ea07, // onreset
+ 0x1e7: 0x13c08, // autoplay
+ 0x1e8: 0x63109, // onseeking
+ 0x1ea: 0x67506, // scoped
+ 0x1ec: 0x30a, // radiogroup
+ 0x1ee: 0x3800b, // contextmenu
+ 0x1ef: 0x52e09, // onmouseup
+ 0x1f1: 0x2ca06, // hgroup
+ 0x1f2: 0x2080f, // allowfullscreen
+ 0x1f3: 0x4be08, // tabindex
+ 0x1f6: 0x30f07, // isindex
+ 0x1f7: 0x1a0e, // accept-charset
+ 0x1f8: 0x2ae0e, // formnovalidate
+ 0x1fb: 0x1c90e, // annotation-xml
+ 0x1fc: 0x6e05, // embed
+ 0x1fd: 0x21806, // script
+ 0x1fe: 0xbb06, // dialog
+ 0x1ff: 0x1d707, // command
+}
+
+const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
+ "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
+ "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
+ "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
+ "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
+ "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
+ "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
+ "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
+ "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
+ "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
+ "ignObjectforeignobjectformactionautocompleteerrorformenctype" +
+ "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
+ "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
+ "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
+ "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
+ "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
+ "enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
+ "articleondropzonemptiedondurationchangeonendedonerroronfocus" +
+ "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
+ "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
+ "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
+ "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
+ "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
+ "classectionbluronpageshowbronpastepublicontenteditableonpaus" +
+ "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
+ "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
+ "violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
+ "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
+ "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
+ "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
+ "arysupsvgsystemplateworkertypewrap"
diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
new file mode 100644
index 000000000000..ff7acf2d5b4b
--- /dev/null
+++ b/vendor/golang.org/x/net/html/const.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// Section 12.2.4.2 of the HTML5 specification says "The following elements
+// have varying levels of special parsing rules".
+// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
+var isSpecialElementMap = map[string]bool{
+ "address": true,
+ "applet": true,
+ "area": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "bgsound": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "button": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "embed": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hgroup": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "li": true,
+ "link": true,
+ "listing": true,
+ "main": true,
+ "marquee": true,
+ "menu": true,
+ "meta": true,
+ "nav": true,
+ "noembed": true,
+ "noframes": true,
+ "noscript": true,
+ "object": true,
+ "ol": true,
+ "p": true,
+ "param": true,
+ "plaintext": true,
+ "pre": true,
+ "script": true,
+ "section": true,
+ "select": true,
+ "source": true,
+ "style": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "template": true,
+ "textarea": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+ "wbr": true,
+ "xmp": true,
+}
+
+func isSpecialElement(element *Node) bool {
+ switch element.Namespace {
+ case "", "html":
+ return isSpecialElementMap[element.Data]
+ case "math":
+ switch element.Data {
+ case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
+ return true
+ }
+ case "svg":
+ switch element.Data {
+ case "foreignObject", "desc", "title":
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
new file mode 100644
index 000000000000..3a7e5ab1765b
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -0,0 +1,127 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package html implements an HTML5-compliant tokenizer and parser.
+
+Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
+caller's responsibility to ensure that r provides UTF-8 encoded HTML.
+
+ z := html.NewTokenizer(r)
+
+Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
+which parses the next token and returns its type, or an error:
+
+ for {
+ tt := z.Next()
+ if tt == html.ErrorToken {
+ // ...
+ return ...
+ }
+ // Process the current token.
+ }
+
+There are two APIs for retrieving the current token. The high-level API is to
+call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
+allow optionally calling Raw after Next but before Token, Text, TagName, or
+TagAttr. In EBNF notation, the valid call sequence per token is:
+
+ Next {Raw} [ Token | Text | TagName {TagAttr} ]
+
+Token returns an independent data structure that completely describes a token.
+Entities (such as "<") are unescaped, tag names and attribute keys are
+lower-cased, and attributes are collected into a []Attribute. For example:
+
+ for {
+ if z.Next() == html.ErrorToken {
+ // Returning io.EOF indicates success.
+ return z.Err()
+ }
+ emitToken(z.Token())
+ }
+
+The low-level API performs fewer allocations and copies, but the contents of
+the []byte values returned by Text, TagName and TagAttr may change on the next
+call to Next. For example, to extract an HTML page's anchor text:
+
+ depth := 0
+ for {
+ tt := z.Next()
+ switch tt {
+ case html.ErrorToken:
+ return z.Err()
+ case html.TextToken:
+ if depth > 0 {
+ // emitBytes should copy the []byte it receives,
+ // if it doesn't process it immediately.
+ emitBytes(z.Text())
+ }
+ case html.StartTagToken, html.EndTagToken:
+ tn, _ := z.TagName()
+ if len(tn) == 1 && tn[0] == 'a' {
+ if tt == html.StartTagToken {
+ depth++
+ } else {
+ depth--
+ }
+ }
+ }
+ }
+
+Parsing is done by calling Parse with an io.Reader, which returns the root of
+the parse tree (the document element) as a *Node. It is the caller's
+responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
+example, to process each anchor node in depth-first order:
+
+ doc, err := html.Parse(r)
+ if err != nil {
+ // ...
+ }
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.ElementNode && n.Data == "a" {
+ // Do something with n...
+ }
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ f(doc)
+
+The relevant specifications include:
+https://html.spec.whatwg.org/multipage/syntax.html and
+https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+
+# Security Considerations
+
+Care should be taken when parsing and interpreting HTML, whether full documents
+or fragments, within the framework of the HTML specification, especially with
+regard to untrusted inputs.
+
+This package provides both a tokenizer and a parser, which implement the
+tokenization, and tokenization and tree construction stages of the WHATWG HTML
+parsing specification respectively. While the tokenizer parses and normalizes
+individual HTML tokens, only the parser constructs the DOM tree from the
+tokenized HTML, as described in the tree construction stage of the
+specification, dynamically modifying or extending the document's DOM tree.
+
+If your use case requires semantically well-formed HTML documents, as defined by
+the WHATWG specification, the parser should be used rather than the tokenizer.
+
+In security contexts, if trust decisions are being made using the tokenized or
+parsed content, the input must be re-serialized (for instance by using Render or
+Token.String) in order for those trust decisions to hold, as the process of
+tokenization or parsing may alter the content.
+*/
+package html // import "golang.org/x/net/html"
+
+// The tokenization algorithm implemented by this package is not a line-by-line
+// transliteration of the relatively verbose state-machine in the WHATWG
+// specification. A more direct approach is used instead, where the program
+// counter implies the state, such as whether it is tokenizing a tag or a text
+// node. Specification compliance is verified by checking expected and actual
+// outputs over a test suite rather than aiming for algorithmic fidelity.
+
+// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
+// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go
new file mode 100644
index 000000000000..c484e5a94fbf
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doctype.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+// parseDoctype parses the data from a DoctypeToken into a name,
+// public identifier, and system identifier. It returns a Node whose Type
+// is DoctypeNode, whose Data is the name, and which has attributes
+// named "system" and "public" for the two identifiers if they were present.
+// quirks is whether the document should be parsed in "quirks mode".
+func parseDoctype(s string) (n *Node, quirks bool) {
+ n = &Node{Type: DoctypeNode}
+
+ // Find the name.
+ space := strings.IndexAny(s, whitespace)
+ if space == -1 {
+ space = len(s)
+ }
+ n.Data = s[:space]
+ // The comparison to "html" is case-sensitive.
+ if n.Data != "html" {
+ quirks = true
+ }
+ n.Data = strings.ToLower(n.Data)
+ s = strings.TrimLeft(s[space:], whitespace)
+
+ if len(s) < 6 {
+ // It can't start with "PUBLIC" or "SYSTEM".
+ // Ignore the rest of the string.
+ return n, quirks || s != ""
+ }
+
+ key := strings.ToLower(s[:6])
+ s = s[6:]
+ for key == "public" || key == "system" {
+ s = strings.TrimLeft(s, whitespace)
+ if s == "" {
+ break
+ }
+ quote := s[0]
+ if quote != '"' && quote != '\'' {
+ break
+ }
+ s = s[1:]
+ q := strings.IndexRune(s, rune(quote))
+ var id string
+ if q == -1 {
+ id = s
+ s = ""
+ } else {
+ id = s[:q]
+ s = s[q+1:]
+ }
+ n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
+ if key == "public" {
+ key = "system"
+ } else {
+ key = ""
+ }
+ }
+
+ if key != "" || s != "" {
+ quirks = true
+ } else if len(n.Attr) > 0 {
+ if n.Attr[0].Key == "public" {
+ public := strings.ToLower(n.Attr[0].Val)
+ switch public {
+ case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
+ quirks = true
+ default:
+ for _, q := range quirkyIDs {
+ if strings.HasPrefix(public, q) {
+ quirks = true
+ break
+ }
+ }
+ }
+ // The following two public IDs only cause quirks mode if there is no system ID.
+ if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
+ strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
+ quirks = true
+ }
+ }
+ if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
+ strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
+ quirks = true
+ }
+ }
+
+ return n, quirks
+}
+
+// quirkyIDs is a list of public doctype identifiers that cause a document
+// to be interpreted in quirks mode. The identifiers should be in lower case.
+var quirkyIDs = []string{
+ "+//silmaril//dtd html pro v0r11 19970101//",
+ "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+ "-//as//dtd html 3.0 aswedit + extensions//",
+ "-//ietf//dtd html 2.0 level 1//",
+ "-//ietf//dtd html 2.0 level 2//",
+ "-//ietf//dtd html 2.0 strict level 1//",
+ "-//ietf//dtd html 2.0 strict level 2//",
+ "-//ietf//dtd html 2.0 strict//",
+ "-//ietf//dtd html 2.0//",
+ "-//ietf//dtd html 2.1e//",
+ "-//ietf//dtd html 3.0//",
+ "-//ietf//dtd html 3.2 final//",
+ "-//ietf//dtd html 3.2//",
+ "-//ietf//dtd html 3//",
+ "-//ietf//dtd html level 0//",
+ "-//ietf//dtd html level 1//",
+ "-//ietf//dtd html level 2//",
+ "-//ietf//dtd html level 3//",
+ "-//ietf//dtd html strict level 0//",
+ "-//ietf//dtd html strict level 1//",
+ "-//ietf//dtd html strict level 2//",
+ "-//ietf//dtd html strict level 3//",
+ "-//ietf//dtd html strict//",
+ "-//ietf//dtd html//",
+ "-//metrius//dtd metrius presentational//",
+ "-//microsoft//dtd internet explorer 2.0 html strict//",
+ "-//microsoft//dtd internet explorer 2.0 html//",
+ "-//microsoft//dtd internet explorer 2.0 tables//",
+ "-//microsoft//dtd internet explorer 3.0 html strict//",
+ "-//microsoft//dtd internet explorer 3.0 html//",
+ "-//microsoft//dtd internet explorer 3.0 tables//",
+ "-//netscape comm. corp.//dtd html//",
+ "-//netscape comm. corp.//dtd strict html//",
+ "-//o'reilly and associates//dtd html 2.0//",
+ "-//o'reilly and associates//dtd html extended 1.0//",
+ "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+ "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+ "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+ "-//spyglass//dtd html 2.0 extended//",
+ "-//sq//dtd html 2.0 hotmetal + extensions//",
+ "-//sun microsystems corp.//dtd hotjava html//",
+ "-//sun microsystems corp.//dtd hotjava strict html//",
+ "-//w3c//dtd html 3 1995-03-24//",
+ "-//w3c//dtd html 3.2 draft//",
+ "-//w3c//dtd html 3.2 final//",
+ "-//w3c//dtd html 3.2//",
+ "-//w3c//dtd html 3.2s draft//",
+ "-//w3c//dtd html 4.0 frameset//",
+ "-//w3c//dtd html 4.0 transitional//",
+ "-//w3c//dtd html experimental 19960712//",
+ "-//w3c//dtd html experimental 970421//",
+ "-//w3c//dtd w3 html//",
+ "-//w3o//dtd w3 html 3.0//",
+ "-//webtechs//dtd mozilla html 2.0//",
+ "-//webtechs//dtd mozilla html//",
+}
diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go
new file mode 100644
index 000000000000..b628880a014d
--- /dev/null
+++ b/vendor/golang.org/x/net/html/entity.go
@@ -0,0 +1,2253 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// All entities that do not end with ';' are 6 or fewer bytes long.
+const longestEntityWithoutSemicolon = 6
+
+// entity is a map from HTML entity names to their values. The semicolon matters:
+// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
+// lists both "amp" and "amp;" as two separate entries.
+//
+// Note that the HTML5 list is larger than the HTML4 list at
+// http://www.w3.org/TR/html4/sgml/entities.html
+var entity = map[string]rune{
+ "AElig;": '\U000000C6',
+ "AMP;": '\U00000026',
+ "Aacute;": '\U000000C1',
+ "Abreve;": '\U00000102',
+ "Acirc;": '\U000000C2',
+ "Acy;": '\U00000410',
+ "Afr;": '\U0001D504',
+ "Agrave;": '\U000000C0',
+ "Alpha;": '\U00000391',
+ "Amacr;": '\U00000100',
+ "And;": '\U00002A53',
+ "Aogon;": '\U00000104',
+ "Aopf;": '\U0001D538',
+ "ApplyFunction;": '\U00002061',
+ "Aring;": '\U000000C5',
+ "Ascr;": '\U0001D49C',
+ "Assign;": '\U00002254',
+ "Atilde;": '\U000000C3',
+ "Auml;": '\U000000C4',
+ "Backslash;": '\U00002216',
+ "Barv;": '\U00002AE7',
+ "Barwed;": '\U00002306',
+ "Bcy;": '\U00000411',
+ "Because;": '\U00002235',
+ "Bernoullis;": '\U0000212C',
+ "Beta;": '\U00000392',
+ "Bfr;": '\U0001D505',
+ "Bopf;": '\U0001D539',
+ "Breve;": '\U000002D8',
+ "Bscr;": '\U0000212C',
+ "Bumpeq;": '\U0000224E',
+ "CHcy;": '\U00000427',
+ "COPY;": '\U000000A9',
+ "Cacute;": '\U00000106',
+ "Cap;": '\U000022D2',
+ "CapitalDifferentialD;": '\U00002145',
+ "Cayleys;": '\U0000212D',
+ "Ccaron;": '\U0000010C',
+ "Ccedil;": '\U000000C7',
+ "Ccirc;": '\U00000108',
+ "Cconint;": '\U00002230',
+ "Cdot;": '\U0000010A',
+ "Cedilla;": '\U000000B8',
+ "CenterDot;": '\U000000B7',
+ "Cfr;": '\U0000212D',
+ "Chi;": '\U000003A7',
+ "CircleDot;": '\U00002299',
+ "CircleMinus;": '\U00002296',
+ "CirclePlus;": '\U00002295',
+ "CircleTimes;": '\U00002297',
+ "ClockwiseContourIntegral;": '\U00002232',
+ "CloseCurlyDoubleQuote;": '\U0000201D',
+ "CloseCurlyQuote;": '\U00002019',
+ "Colon;": '\U00002237',
+ "Colone;": '\U00002A74',
+ "Congruent;": '\U00002261',
+ "Conint;": '\U0000222F',
+ "ContourIntegral;": '\U0000222E',
+ "Copf;": '\U00002102',
+ "Coproduct;": '\U00002210',
+ "CounterClockwiseContourIntegral;": '\U00002233',
+ "Cross;": '\U00002A2F',
+ "Cscr;": '\U0001D49E',
+ "Cup;": '\U000022D3',
+ "CupCap;": '\U0000224D',
+ "DD;": '\U00002145',
+ "DDotrahd;": '\U00002911',
+ "DJcy;": '\U00000402',
+ "DScy;": '\U00000405',
+ "DZcy;": '\U0000040F',
+ "Dagger;": '\U00002021',
+ "Darr;": '\U000021A1',
+ "Dashv;": '\U00002AE4',
+ "Dcaron;": '\U0000010E',
+ "Dcy;": '\U00000414',
+ "Del;": '\U00002207',
+ "Delta;": '\U00000394',
+ "Dfr;": '\U0001D507',
+ "DiacriticalAcute;": '\U000000B4',
+ "DiacriticalDot;": '\U000002D9',
+ "DiacriticalDoubleAcute;": '\U000002DD',
+ "DiacriticalGrave;": '\U00000060',
+ "DiacriticalTilde;": '\U000002DC',
+ "Diamond;": '\U000022C4',
+ "DifferentialD;": '\U00002146',
+ "Dopf;": '\U0001D53B',
+ "Dot;": '\U000000A8',
+ "DotDot;": '\U000020DC',
+ "DotEqual;": '\U00002250',
+ "DoubleContourIntegral;": '\U0000222F',
+ "DoubleDot;": '\U000000A8',
+ "DoubleDownArrow;": '\U000021D3',
+ "DoubleLeftArrow;": '\U000021D0',
+ "DoubleLeftRightArrow;": '\U000021D4',
+ "DoubleLeftTee;": '\U00002AE4',
+ "DoubleLongLeftArrow;": '\U000027F8',
+ "DoubleLongLeftRightArrow;": '\U000027FA',
+ "DoubleLongRightArrow;": '\U000027F9',
+ "DoubleRightArrow;": '\U000021D2',
+ "DoubleRightTee;": '\U000022A8',
+ "DoubleUpArrow;": '\U000021D1',
+ "DoubleUpDownArrow;": '\U000021D5',
+ "DoubleVerticalBar;": '\U00002225',
+ "DownArrow;": '\U00002193',
+ "DownArrowBar;": '\U00002913',
+ "DownArrowUpArrow;": '\U000021F5',
+ "DownBreve;": '\U00000311',
+ "DownLeftRightVector;": '\U00002950',
+ "DownLeftTeeVector;": '\U0000295E',
+ "DownLeftVector;": '\U000021BD',
+ "DownLeftVectorBar;": '\U00002956',
+ "DownRightTeeVector;": '\U0000295F',
+ "DownRightVector;": '\U000021C1',
+ "DownRightVectorBar;": '\U00002957',
+ "DownTee;": '\U000022A4',
+ "DownTeeArrow;": '\U000021A7',
+ "Downarrow;": '\U000021D3',
+ "Dscr;": '\U0001D49F',
+ "Dstrok;": '\U00000110',
+ "ENG;": '\U0000014A',
+ "ETH;": '\U000000D0',
+ "Eacute;": '\U000000C9',
+ "Ecaron;": '\U0000011A',
+ "Ecirc;": '\U000000CA',
+ "Ecy;": '\U0000042D',
+ "Edot;": '\U00000116',
+ "Efr;": '\U0001D508',
+ "Egrave;": '\U000000C8',
+ "Element;": '\U00002208',
+ "Emacr;": '\U00000112',
+ "EmptySmallSquare;": '\U000025FB',
+ "EmptyVerySmallSquare;": '\U000025AB',
+ "Eogon;": '\U00000118',
+ "Eopf;": '\U0001D53C',
+ "Epsilon;": '\U00000395',
+ "Equal;": '\U00002A75',
+ "EqualTilde;": '\U00002242',
+ "Equilibrium;": '\U000021CC',
+ "Escr;": '\U00002130',
+ "Esim;": '\U00002A73',
+ "Eta;": '\U00000397',
+ "Euml;": '\U000000CB',
+ "Exists;": '\U00002203',
+ "ExponentialE;": '\U00002147',
+ "Fcy;": '\U00000424',
+ "Ffr;": '\U0001D509',
+ "FilledSmallSquare;": '\U000025FC',
+ "FilledVerySmallSquare;": '\U000025AA',
+ "Fopf;": '\U0001D53D',
+ "ForAll;": '\U00002200',
+ "Fouriertrf;": '\U00002131',
+ "Fscr;": '\U00002131',
+ "GJcy;": '\U00000403',
+ "GT;": '\U0000003E',
+ "Gamma;": '\U00000393',
+ "Gammad;": '\U000003DC',
+ "Gbreve;": '\U0000011E',
+ "Gcedil;": '\U00000122',
+ "Gcirc;": '\U0000011C',
+ "Gcy;": '\U00000413',
+ "Gdot;": '\U00000120',
+ "Gfr;": '\U0001D50A',
+ "Gg;": '\U000022D9',
+ "Gopf;": '\U0001D53E',
+ "GreaterEqual;": '\U00002265',
+ "GreaterEqualLess;": '\U000022DB',
+ "GreaterFullEqual;": '\U00002267',
+ "GreaterGreater;": '\U00002AA2',
+ "GreaterLess;": '\U00002277',
+ "GreaterSlantEqual;": '\U00002A7E',
+ "GreaterTilde;": '\U00002273',
+ "Gscr;": '\U0001D4A2',
+ "Gt;": '\U0000226B',
+ "HARDcy;": '\U0000042A',
+ "Hacek;": '\U000002C7',
+ "Hat;": '\U0000005E',
+ "Hcirc;": '\U00000124',
+ "Hfr;": '\U0000210C',
+ "HilbertSpace;": '\U0000210B',
+ "Hopf;": '\U0000210D',
+ "HorizontalLine;": '\U00002500',
+ "Hscr;": '\U0000210B',
+ "Hstrok;": '\U00000126',
+ "HumpDownHump;": '\U0000224E',
+ "HumpEqual;": '\U0000224F',
+ "IEcy;": '\U00000415',
+ "IJlig;": '\U00000132',
+ "IOcy;": '\U00000401',
+ "Iacute;": '\U000000CD',
+ "Icirc;": '\U000000CE',
+ "Icy;": '\U00000418',
+ "Idot;": '\U00000130',
+ "Ifr;": '\U00002111',
+ "Igrave;": '\U000000CC',
+ "Im;": '\U00002111',
+ "Imacr;": '\U0000012A',
+ "ImaginaryI;": '\U00002148',
+ "Implies;": '\U000021D2',
+ "Int;": '\U0000222C',
+ "Integral;": '\U0000222B',
+ "Intersection;": '\U000022C2',
+ "InvisibleComma;": '\U00002063',
+ "InvisibleTimes;": '\U00002062',
+ "Iogon;": '\U0000012E',
+ "Iopf;": '\U0001D540',
+ "Iota;": '\U00000399',
+ "Iscr;": '\U00002110',
+ "Itilde;": '\U00000128',
+ "Iukcy;": '\U00000406',
+ "Iuml;": '\U000000CF',
+ "Jcirc;": '\U00000134',
+ "Jcy;": '\U00000419',
+ "Jfr;": '\U0001D50D',
+ "Jopf;": '\U0001D541',
+ "Jscr;": '\U0001D4A5',
+ "Jsercy;": '\U00000408',
+ "Jukcy;": '\U00000404',
+ "KHcy;": '\U00000425',
+ "KJcy;": '\U0000040C',
+ "Kappa;": '\U0000039A',
+ "Kcedil;": '\U00000136',
+ "Kcy;": '\U0000041A',
+ "Kfr;": '\U0001D50E',
+ "Kopf;": '\U0001D542',
+ "Kscr;": '\U0001D4A6',
+ "LJcy;": '\U00000409',
+ "LT;": '\U0000003C',
+ "Lacute;": '\U00000139',
+ "Lambda;": '\U0000039B',
+ "Lang;": '\U000027EA',
+ "Laplacetrf;": '\U00002112',
+ "Larr;": '\U0000219E',
+ "Lcaron;": '\U0000013D',
+ "Lcedil;": '\U0000013B',
+ "Lcy;": '\U0000041B',
+ "LeftAngleBracket;": '\U000027E8',
+ "LeftArrow;": '\U00002190',
+ "LeftArrowBar;": '\U000021E4',
+ "LeftArrowRightArrow;": '\U000021C6',
+ "LeftCeiling;": '\U00002308',
+ "LeftDoubleBracket;": '\U000027E6',
+ "LeftDownTeeVector;": '\U00002961',
+ "LeftDownVector;": '\U000021C3',
+ "LeftDownVectorBar;": '\U00002959',
+ "LeftFloor;": '\U0000230A',
+ "LeftRightArrow;": '\U00002194',
+ "LeftRightVector;": '\U0000294E',
+ "LeftTee;": '\U000022A3',
+ "LeftTeeArrow;": '\U000021A4',
+ "LeftTeeVector;": '\U0000295A',
+ "LeftTriangle;": '\U000022B2',
+ "LeftTriangleBar;": '\U000029CF',
+ "LeftTriangleEqual;": '\U000022B4',
+ "LeftUpDownVector;": '\U00002951',
+ "LeftUpTeeVector;": '\U00002960',
+ "LeftUpVector;": '\U000021BF',
+ "LeftUpVectorBar;": '\U00002958',
+ "LeftVector;": '\U000021BC',
+ "LeftVectorBar;": '\U00002952',
+ "Leftarrow;": '\U000021D0',
+ "Leftrightarrow;": '\U000021D4',
+ "LessEqualGreater;": '\U000022DA',
+ "LessFullEqual;": '\U00002266',
+ "LessGreater;": '\U00002276',
+ "LessLess;": '\U00002AA1',
+ "LessSlantEqual;": '\U00002A7D',
+ "LessTilde;": '\U00002272',
+ "Lfr;": '\U0001D50F',
+ "Ll;": '\U000022D8',
+ "Lleftarrow;": '\U000021DA',
+ "Lmidot;": '\U0000013F',
+ "LongLeftArrow;": '\U000027F5',
+ "LongLeftRightArrow;": '\U000027F7',
+ "LongRightArrow;": '\U000027F6',
+ "Longleftarrow;": '\U000027F8',
+ "Longleftrightarrow;": '\U000027FA',
+ "Longrightarrow;": '\U000027F9',
+ "Lopf;": '\U0001D543',
+ "LowerLeftArrow;": '\U00002199',
+ "LowerRightArrow;": '\U00002198',
+ "Lscr;": '\U00002112',
+ "Lsh;": '\U000021B0',
+ "Lstrok;": '\U00000141',
+ "Lt;": '\U0000226A',
+ "Map;": '\U00002905',
+ "Mcy;": '\U0000041C',
+ "MediumSpace;": '\U0000205F',
+ "Mellintrf;": '\U00002133',
+ "Mfr;": '\U0001D510',
+ "MinusPlus;": '\U00002213',
+ "Mopf;": '\U0001D544',
+ "Mscr;": '\U00002133',
+ "Mu;": '\U0000039C',
+ "NJcy;": '\U0000040A',
+ "Nacute;": '\U00000143',
+ "Ncaron;": '\U00000147',
+ "Ncedil;": '\U00000145',
+ "Ncy;": '\U0000041D',
+ "NegativeMediumSpace;": '\U0000200B',
+ "NegativeThickSpace;": '\U0000200B',
+ "NegativeThinSpace;": '\U0000200B',
+ "NegativeVeryThinSpace;": '\U0000200B',
+ "NestedGreaterGreater;": '\U0000226B',
+ "NestedLessLess;": '\U0000226A',
+ "NewLine;": '\U0000000A',
+ "Nfr;": '\U0001D511',
+ "NoBreak;": '\U00002060',
+ "NonBreakingSpace;": '\U000000A0',
+ "Nopf;": '\U00002115',
+ "Not;": '\U00002AEC',
+ "NotCongruent;": '\U00002262',
+ "NotCupCap;": '\U0000226D',
+ "NotDoubleVerticalBar;": '\U00002226',
+ "NotElement;": '\U00002209',
+ "NotEqual;": '\U00002260',
+ "NotExists;": '\U00002204',
+ "NotGreater;": '\U0000226F',
+ "NotGreaterEqual;": '\U00002271',
+ "NotGreaterLess;": '\U00002279',
+ "NotGreaterTilde;": '\U00002275',
+ "NotLeftTriangle;": '\U000022EA',
+ "NotLeftTriangleEqual;": '\U000022EC',
+ "NotLess;": '\U0000226E',
+ "NotLessEqual;": '\U00002270',
+ "NotLessGreater;": '\U00002278',
+ "NotLessTilde;": '\U00002274',
+ "NotPrecedes;": '\U00002280',
+ "NotPrecedesSlantEqual;": '\U000022E0',
+ "NotReverseElement;": '\U0000220C',
+ "NotRightTriangle;": '\U000022EB',
+ "NotRightTriangleEqual;": '\U000022ED',
+ "NotSquareSubsetEqual;": '\U000022E2',
+ "NotSquareSupersetEqual;": '\U000022E3',
+ "NotSubsetEqual;": '\U00002288',
+ "NotSucceeds;": '\U00002281',
+ "NotSucceedsSlantEqual;": '\U000022E1',
+ "NotSupersetEqual;": '\U00002289',
+ "NotTilde;": '\U00002241',
+ "NotTildeEqual;": '\U00002244',
+ "NotTildeFullEqual;": '\U00002247',
+ "NotTildeTilde;": '\U00002249',
+ "NotVerticalBar;": '\U00002224',
+ "Nscr;": '\U0001D4A9',
+ "Ntilde;": '\U000000D1',
+ "Nu;": '\U0000039D',
+ "OElig;": '\U00000152',
+ "Oacute;": '\U000000D3',
+ "Ocirc;": '\U000000D4',
+ "Ocy;": '\U0000041E',
+ "Odblac;": '\U00000150',
+ "Ofr;": '\U0001D512',
+ "Ograve;": '\U000000D2',
+ "Omacr;": '\U0000014C',
+ "Omega;": '\U000003A9',
+ "Omicron;": '\U0000039F',
+ "Oopf;": '\U0001D546',
+ "OpenCurlyDoubleQuote;": '\U0000201C',
+ "OpenCurlyQuote;": '\U00002018',
+ "Or;": '\U00002A54',
+ "Oscr;": '\U0001D4AA',
+ "Oslash;": '\U000000D8',
+ "Otilde;": '\U000000D5',
+ "Otimes;": '\U00002A37',
+ "Ouml;": '\U000000D6',
+ "OverBar;": '\U0000203E',
+ "OverBrace;": '\U000023DE',
+ "OverBracket;": '\U000023B4',
+ "OverParenthesis;": '\U000023DC',
+ "PartialD;": '\U00002202',
+ "Pcy;": '\U0000041F',
+ "Pfr;": '\U0001D513',
+ "Phi;": '\U000003A6',
+ "Pi;": '\U000003A0',
+ "PlusMinus;": '\U000000B1',
+ "Poincareplane;": '\U0000210C',
+ "Popf;": '\U00002119',
+ "Pr;": '\U00002ABB',
+ "Precedes;": '\U0000227A',
+ "PrecedesEqual;": '\U00002AAF',
+ "PrecedesSlantEqual;": '\U0000227C',
+ "PrecedesTilde;": '\U0000227E',
+ "Prime;": '\U00002033',
+ "Product;": '\U0000220F',
+ "Proportion;": '\U00002237',
+ "Proportional;": '\U0000221D',
+ "Pscr;": '\U0001D4AB',
+ "Psi;": '\U000003A8',
+ "QUOT;": '\U00000022',
+ "Qfr;": '\U0001D514',
+ "Qopf;": '\U0000211A',
+ "Qscr;": '\U0001D4AC',
+ "RBarr;": '\U00002910',
+ "REG;": '\U000000AE',
+ "Racute;": '\U00000154',
+ "Rang;": '\U000027EB',
+ "Rarr;": '\U000021A0',
+ "Rarrtl;": '\U00002916',
+ "Rcaron;": '\U00000158',
+ "Rcedil;": '\U00000156',
+ "Rcy;": '\U00000420',
+ "Re;": '\U0000211C',
+ "ReverseElement;": '\U0000220B',
+ "ReverseEquilibrium;": '\U000021CB',
+ "ReverseUpEquilibrium;": '\U0000296F',
+ "Rfr;": '\U0000211C',
+ "Rho;": '\U000003A1',
+ "RightAngleBracket;": '\U000027E9',
+ "RightArrow;": '\U00002192',
+ "RightArrowBar;": '\U000021E5',
+ "RightArrowLeftArrow;": '\U000021C4',
+ "RightCeiling;": '\U00002309',
+ "RightDoubleBracket;": '\U000027E7',
+ "RightDownTeeVector;": '\U0000295D',
+ "RightDownVector;": '\U000021C2',
+ "RightDownVectorBar;": '\U00002955',
+ "RightFloor;": '\U0000230B',
+ "RightTee;": '\U000022A2',
+ "RightTeeArrow;": '\U000021A6',
+ "RightTeeVector;": '\U0000295B',
+ "RightTriangle;": '\U000022B3',
+ "RightTriangleBar;": '\U000029D0',
+ "RightTriangleEqual;": '\U000022B5',
+ "RightUpDownVector;": '\U0000294F',
+ "RightUpTeeVector;": '\U0000295C',
+ "RightUpVector;": '\U000021BE',
+ "RightUpVectorBar;": '\U00002954',
+ "RightVector;": '\U000021C0',
+ "RightVectorBar;": '\U00002953',
+ "Rightarrow;": '\U000021D2',
+ "Ropf;": '\U0000211D',
+ "RoundImplies;": '\U00002970',
+ "Rrightarrow;": '\U000021DB',
+ "Rscr;": '\U0000211B',
+ "Rsh;": '\U000021B1',
+ "RuleDelayed;": '\U000029F4',
+ "SHCHcy;": '\U00000429',
+ "SHcy;": '\U00000428',
+ "SOFTcy;": '\U0000042C',
+ "Sacute;": '\U0000015A',
+ "Sc;": '\U00002ABC',
+ "Scaron;": '\U00000160',
+ "Scedil;": '\U0000015E',
+ "Scirc;": '\U0000015C',
+ "Scy;": '\U00000421',
+ "Sfr;": '\U0001D516',
+ "ShortDownArrow;": '\U00002193',
+ "ShortLeftArrow;": '\U00002190',
+ "ShortRightArrow;": '\U00002192',
+ "ShortUpArrow;": '\U00002191',
+ "Sigma;": '\U000003A3',
+ "SmallCircle;": '\U00002218',
+ "Sopf;": '\U0001D54A',
+ "Sqrt;": '\U0000221A',
+ "Square;": '\U000025A1',
+ "SquareIntersection;": '\U00002293',
+ "SquareSubset;": '\U0000228F',
+ "SquareSubsetEqual;": '\U00002291',
+ "SquareSuperset;": '\U00002290',
+ "SquareSupersetEqual;": '\U00002292',
+ "SquareUnion;": '\U00002294',
+ "Sscr;": '\U0001D4AE',
+ "Star;": '\U000022C6',
+ "Sub;": '\U000022D0',
+ "Subset;": '\U000022D0',
+ "SubsetEqual;": '\U00002286',
+ "Succeeds;": '\U0000227B',
+ "SucceedsEqual;": '\U00002AB0',
+ "SucceedsSlantEqual;": '\U0000227D',
+ "SucceedsTilde;": '\U0000227F',
+ "SuchThat;": '\U0000220B',
+ "Sum;": '\U00002211',
+ "Sup;": '\U000022D1',
+ "Superset;": '\U00002283',
+ "SupersetEqual;": '\U00002287',
+ "Supset;": '\U000022D1',
+ "THORN;": '\U000000DE',
+ "TRADE;": '\U00002122',
+ "TSHcy;": '\U0000040B',
+ "TScy;": '\U00000426',
+ "Tab;": '\U00000009',
+ "Tau;": '\U000003A4',
+ "Tcaron;": '\U00000164',
+ "Tcedil;": '\U00000162',
+ "Tcy;": '\U00000422',
+ "Tfr;": '\U0001D517',
+ "Therefore;": '\U00002234',
+ "Theta;": '\U00000398',
+ "ThinSpace;": '\U00002009',
+ "Tilde;": '\U0000223C',
+ "TildeEqual;": '\U00002243',
+ "TildeFullEqual;": '\U00002245',
+ "TildeTilde;": '\U00002248',
+ "Topf;": '\U0001D54B',
+ "TripleDot;": '\U000020DB',
+ "Tscr;": '\U0001D4AF',
+ "Tstrok;": '\U00000166',
+ "Uacute;": '\U000000DA',
+ "Uarr;": '\U0000219F',
+ "Uarrocir;": '\U00002949',
+ "Ubrcy;": '\U0000040E',
+ "Ubreve;": '\U0000016C',
+ "Ucirc;": '\U000000DB',
+ "Ucy;": '\U00000423',
+ "Udblac;": '\U00000170',
+ "Ufr;": '\U0001D518',
+ "Ugrave;": '\U000000D9',
+ "Umacr;": '\U0000016A',
+ "UnderBar;": '\U0000005F',
+ "UnderBrace;": '\U000023DF',
+ "UnderBracket;": '\U000023B5',
+ "UnderParenthesis;": '\U000023DD',
+ "Union;": '\U000022C3',
+ "UnionPlus;": '\U0000228E',
+ "Uogon;": '\U00000172',
+ "Uopf;": '\U0001D54C',
+ "UpArrow;": '\U00002191',
+ "UpArrowBar;": '\U00002912',
+ "UpArrowDownArrow;": '\U000021C5',
+ "UpDownArrow;": '\U00002195',
+ "UpEquilibrium;": '\U0000296E',
+ "UpTee;": '\U000022A5',
+ "UpTeeArrow;": '\U000021A5',
+ "Uparrow;": '\U000021D1',
+ "Updownarrow;": '\U000021D5',
+ "UpperLeftArrow;": '\U00002196',
+ "UpperRightArrow;": '\U00002197',
+ "Upsi;": '\U000003D2',
+ "Upsilon;": '\U000003A5',
+ "Uring;": '\U0000016E',
+ "Uscr;": '\U0001D4B0',
+ "Utilde;": '\U00000168',
+ "Uuml;": '\U000000DC',
+ "VDash;": '\U000022AB',
+ "Vbar;": '\U00002AEB',
+ "Vcy;": '\U00000412',
+ "Vdash;": '\U000022A9',
+ "Vdashl;": '\U00002AE6',
+ "Vee;": '\U000022C1',
+ "Verbar;": '\U00002016',
+ "Vert;": '\U00002016',
+ "VerticalBar;": '\U00002223',
+ "VerticalLine;": '\U0000007C',
+ "VerticalSeparator;": '\U00002758',
+ "VerticalTilde;": '\U00002240',
+ "VeryThinSpace;": '\U0000200A',
+ "Vfr;": '\U0001D519',
+ "Vopf;": '\U0001D54D',
+ "Vscr;": '\U0001D4B1',
+ "Vvdash;": '\U000022AA',
+ "Wcirc;": '\U00000174',
+ "Wedge;": '\U000022C0',
+ "Wfr;": '\U0001D51A',
+ "Wopf;": '\U0001D54E',
+ "Wscr;": '\U0001D4B2',
+ "Xfr;": '\U0001D51B',
+ "Xi;": '\U0000039E',
+ "Xopf;": '\U0001D54F',
+ "Xscr;": '\U0001D4B3',
+ "YAcy;": '\U0000042F',
+ "YIcy;": '\U00000407',
+ "YUcy;": '\U0000042E',
+ "Yacute;": '\U000000DD',
+ "Ycirc;": '\U00000176',
+ "Ycy;": '\U0000042B',
+ "Yfr;": '\U0001D51C',
+ "Yopf;": '\U0001D550',
+ "Yscr;": '\U0001D4B4',
+ "Yuml;": '\U00000178',
+ "ZHcy;": '\U00000416',
+ "Zacute;": '\U00000179',
+ "Zcaron;": '\U0000017D',
+ "Zcy;": '\U00000417',
+ "Zdot;": '\U0000017B',
+ "ZeroWidthSpace;": '\U0000200B',
+ "Zeta;": '\U00000396',
+ "Zfr;": '\U00002128',
+ "Zopf;": '\U00002124',
+ "Zscr;": '\U0001D4B5',
+ "aacute;": '\U000000E1',
+ "abreve;": '\U00000103',
+ "ac;": '\U0000223E',
+ "acd;": '\U0000223F',
+ "acirc;": '\U000000E2',
+ "acute;": '\U000000B4',
+ "acy;": '\U00000430',
+ "aelig;": '\U000000E6',
+ "af;": '\U00002061',
+ "afr;": '\U0001D51E',
+ "agrave;": '\U000000E0',
+ "alefsym;": '\U00002135',
+ "aleph;": '\U00002135',
+ "alpha;": '\U000003B1',
+ "amacr;": '\U00000101',
+ "amalg;": '\U00002A3F',
+ "amp;": '\U00000026',
+ "and;": '\U00002227',
+ "andand;": '\U00002A55',
+ "andd;": '\U00002A5C',
+ "andslope;": '\U00002A58',
+ "andv;": '\U00002A5A',
+ "ang;": '\U00002220',
+ "ange;": '\U000029A4',
+ "angle;": '\U00002220',
+ "angmsd;": '\U00002221',
+ "angmsdaa;": '\U000029A8',
+ "angmsdab;": '\U000029A9',
+ "angmsdac;": '\U000029AA',
+ "angmsdad;": '\U000029AB',
+ "angmsdae;": '\U000029AC',
+ "angmsdaf;": '\U000029AD',
+ "angmsdag;": '\U000029AE',
+ "angmsdah;": '\U000029AF',
+ "angrt;": '\U0000221F',
+ "angrtvb;": '\U000022BE',
+ "angrtvbd;": '\U0000299D',
+ "angsph;": '\U00002222',
+ "angst;": '\U000000C5',
+ "angzarr;": '\U0000237C',
+ "aogon;": '\U00000105',
+ "aopf;": '\U0001D552',
+ "ap;": '\U00002248',
+ "apE;": '\U00002A70',
+ "apacir;": '\U00002A6F',
+ "ape;": '\U0000224A',
+ "apid;": '\U0000224B',
+ "apos;": '\U00000027',
+ "approx;": '\U00002248',
+ "approxeq;": '\U0000224A',
+ "aring;": '\U000000E5',
+ "ascr;": '\U0001D4B6',
+ "ast;": '\U0000002A',
+ "asymp;": '\U00002248',
+ "asympeq;": '\U0000224D',
+ "atilde;": '\U000000E3',
+ "auml;": '\U000000E4',
+ "awconint;": '\U00002233',
+ "awint;": '\U00002A11',
+ "bNot;": '\U00002AED',
+ "backcong;": '\U0000224C',
+ "backepsilon;": '\U000003F6',
+ "backprime;": '\U00002035',
+ "backsim;": '\U0000223D',
+ "backsimeq;": '\U000022CD',
+ "barvee;": '\U000022BD',
+ "barwed;": '\U00002305',
+ "barwedge;": '\U00002305',
+ "bbrk;": '\U000023B5',
+ "bbrktbrk;": '\U000023B6',
+ "bcong;": '\U0000224C',
+ "bcy;": '\U00000431',
+ "bdquo;": '\U0000201E',
+ "becaus;": '\U00002235',
+ "because;": '\U00002235',
+ "bemptyv;": '\U000029B0',
+ "bepsi;": '\U000003F6',
+ "bernou;": '\U0000212C',
+ "beta;": '\U000003B2',
+ "beth;": '\U00002136',
+ "between;": '\U0000226C',
+ "bfr;": '\U0001D51F',
+ "bigcap;": '\U000022C2',
+ "bigcirc;": '\U000025EF',
+ "bigcup;": '\U000022C3',
+ "bigodot;": '\U00002A00',
+ "bigoplus;": '\U00002A01',
+ "bigotimes;": '\U00002A02',
+ "bigsqcup;": '\U00002A06',
+ "bigstar;": '\U00002605',
+ "bigtriangledown;": '\U000025BD',
+ "bigtriangleup;": '\U000025B3',
+ "biguplus;": '\U00002A04',
+ "bigvee;": '\U000022C1',
+ "bigwedge;": '\U000022C0',
+ "bkarow;": '\U0000290D',
+ "blacklozenge;": '\U000029EB',
+ "blacksquare;": '\U000025AA',
+ "blacktriangle;": '\U000025B4',
+ "blacktriangledown;": '\U000025BE',
+ "blacktriangleleft;": '\U000025C2',
+ "blacktriangleright;": '\U000025B8',
+ "blank;": '\U00002423',
+ "blk12;": '\U00002592',
+ "blk14;": '\U00002591',
+ "blk34;": '\U00002593',
+ "block;": '\U00002588',
+ "bnot;": '\U00002310',
+ "bopf;": '\U0001D553',
+ "bot;": '\U000022A5',
+ "bottom;": '\U000022A5',
+ "bowtie;": '\U000022C8',
+ "boxDL;": '\U00002557',
+ "boxDR;": '\U00002554',
+ "boxDl;": '\U00002556',
+ "boxDr;": '\U00002553',
+ "boxH;": '\U00002550',
+ "boxHD;": '\U00002566',
+ "boxHU;": '\U00002569',
+ "boxHd;": '\U00002564',
+ "boxHu;": '\U00002567',
+ "boxUL;": '\U0000255D',
+ "boxUR;": '\U0000255A',
+ "boxUl;": '\U0000255C',
+ "boxUr;": '\U00002559',
+ "boxV;": '\U00002551',
+ "boxVH;": '\U0000256C',
+ "boxVL;": '\U00002563',
+ "boxVR;": '\U00002560',
+ "boxVh;": '\U0000256B',
+ "boxVl;": '\U00002562',
+ "boxVr;": '\U0000255F',
+ "boxbox;": '\U000029C9',
+ "boxdL;": '\U00002555',
+ "boxdR;": '\U00002552',
+ "boxdl;": '\U00002510',
+ "boxdr;": '\U0000250C',
+ "boxh;": '\U00002500',
+ "boxhD;": '\U00002565',
+ "boxhU;": '\U00002568',
+ "boxhd;": '\U0000252C',
+ "boxhu;": '\U00002534',
+ "boxminus;": '\U0000229F',
+ "boxplus;": '\U0000229E',
+ "boxtimes;": '\U000022A0',
+ "boxuL;": '\U0000255B',
+ "boxuR;": '\U00002558',
+ "boxul;": '\U00002518',
+ "boxur;": '\U00002514',
+ "boxv;": '\U00002502',
+ "boxvH;": '\U0000256A',
+ "boxvL;": '\U00002561',
+ "boxvR;": '\U0000255E',
+ "boxvh;": '\U0000253C',
+ "boxvl;": '\U00002524',
+ "boxvr;": '\U0000251C',
+ "bprime;": '\U00002035',
+ "breve;": '\U000002D8',
+ "brvbar;": '\U000000A6',
+ "bscr;": '\U0001D4B7',
+ "bsemi;": '\U0000204F',
+ "bsim;": '\U0000223D',
+ "bsime;": '\U000022CD',
+ "bsol;": '\U0000005C',
+ "bsolb;": '\U000029C5',
+ "bsolhsub;": '\U000027C8',
+ "bull;": '\U00002022',
+ "bullet;": '\U00002022',
+ "bump;": '\U0000224E',
+ "bumpE;": '\U00002AAE',
+ "bumpe;": '\U0000224F',
+ "bumpeq;": '\U0000224F',
+ "cacute;": '\U00000107',
+ "cap;": '\U00002229',
+ "capand;": '\U00002A44',
+ "capbrcup;": '\U00002A49',
+ "capcap;": '\U00002A4B',
+ "capcup;": '\U00002A47',
+ "capdot;": '\U00002A40',
+ "caret;": '\U00002041',
+ "caron;": '\U000002C7',
+ "ccaps;": '\U00002A4D',
+ "ccaron;": '\U0000010D',
+ "ccedil;": '\U000000E7',
+ "ccirc;": '\U00000109',
+ "ccups;": '\U00002A4C',
+ "ccupssm;": '\U00002A50',
+ "cdot;": '\U0000010B',
+ "cedil;": '\U000000B8',
+ "cemptyv;": '\U000029B2',
+ "cent;": '\U000000A2',
+ "centerdot;": '\U000000B7',
+ "cfr;": '\U0001D520',
+ "chcy;": '\U00000447',
+ "check;": '\U00002713',
+ "checkmark;": '\U00002713',
+ "chi;": '\U000003C7',
+ "cir;": '\U000025CB',
+ "cirE;": '\U000029C3',
+ "circ;": '\U000002C6',
+ "circeq;": '\U00002257',
+ "circlearrowleft;": '\U000021BA',
+ "circlearrowright;": '\U000021BB',
+ "circledR;": '\U000000AE',
+ "circledS;": '\U000024C8',
+ "circledast;": '\U0000229B',
+ "circledcirc;": '\U0000229A',
+ "circleddash;": '\U0000229D',
+ "cire;": '\U00002257',
+ "cirfnint;": '\U00002A10',
+ "cirmid;": '\U00002AEF',
+ "cirscir;": '\U000029C2',
+ "clubs;": '\U00002663',
+ "clubsuit;": '\U00002663',
+ "colon;": '\U0000003A',
+ "colone;": '\U00002254',
+ "coloneq;": '\U00002254',
+ "comma;": '\U0000002C',
+ "commat;": '\U00000040',
+ "comp;": '\U00002201',
+ "compfn;": '\U00002218',
+ "complement;": '\U00002201',
+ "complexes;": '\U00002102',
+ "cong;": '\U00002245',
+ "congdot;": '\U00002A6D',
+ "conint;": '\U0000222E',
+ "copf;": '\U0001D554',
+ "coprod;": '\U00002210',
+ "copy;": '\U000000A9',
+ "copysr;": '\U00002117',
+ "crarr;": '\U000021B5',
+ "cross;": '\U00002717',
+ "cscr;": '\U0001D4B8',
+ "csub;": '\U00002ACF',
+ "csube;": '\U00002AD1',
+ "csup;": '\U00002AD0',
+ "csupe;": '\U00002AD2',
+ "ctdot;": '\U000022EF',
+ "cudarrl;": '\U00002938',
+ "cudarrr;": '\U00002935',
+ "cuepr;": '\U000022DE',
+ "cuesc;": '\U000022DF',
+ "cularr;": '\U000021B6',
+ "cularrp;": '\U0000293D',
+ "cup;": '\U0000222A',
+ "cupbrcap;": '\U00002A48',
+ "cupcap;": '\U00002A46',
+ "cupcup;": '\U00002A4A',
+ "cupdot;": '\U0000228D',
+ "cupor;": '\U00002A45',
+ "curarr;": '\U000021B7',
+ "curarrm;": '\U0000293C',
+ "curlyeqprec;": '\U000022DE',
+ "curlyeqsucc;": '\U000022DF',
+ "curlyvee;": '\U000022CE',
+ "curlywedge;": '\U000022CF',
+ "curren;": '\U000000A4',
+ "curvearrowleft;": '\U000021B6',
+ "curvearrowright;": '\U000021B7',
+ "cuvee;": '\U000022CE',
+ "cuwed;": '\U000022CF',
+ "cwconint;": '\U00002232',
+ "cwint;": '\U00002231',
+ "cylcty;": '\U0000232D',
+ "dArr;": '\U000021D3',
+ "dHar;": '\U00002965',
+ "dagger;": '\U00002020',
+ "daleth;": '\U00002138',
+ "darr;": '\U00002193',
+ "dash;": '\U00002010',
+ "dashv;": '\U000022A3',
+ "dbkarow;": '\U0000290F',
+ "dblac;": '\U000002DD',
+ "dcaron;": '\U0000010F',
+ "dcy;": '\U00000434',
+ "dd;": '\U00002146',
+ "ddagger;": '\U00002021',
+ "ddarr;": '\U000021CA',
+ "ddotseq;": '\U00002A77',
+ "deg;": '\U000000B0',
+ "delta;": '\U000003B4',
+ "demptyv;": '\U000029B1',
+ "dfisht;": '\U0000297F',
+ "dfr;": '\U0001D521',
+ "dharl;": '\U000021C3',
+ "dharr;": '\U000021C2',
+ "diam;": '\U000022C4',
+ "diamond;": '\U000022C4',
+ "diamondsuit;": '\U00002666',
+ "diams;": '\U00002666',
+ "die;": '\U000000A8',
+ "digamma;": '\U000003DD',
+ "disin;": '\U000022F2',
+ "div;": '\U000000F7',
+ "divide;": '\U000000F7',
+ "divideontimes;": '\U000022C7',
+ "divonx;": '\U000022C7',
+ "djcy;": '\U00000452',
+ "dlcorn;": '\U0000231E',
+ "dlcrop;": '\U0000230D',
+ "dollar;": '\U00000024',
+ "dopf;": '\U0001D555',
+ "dot;": '\U000002D9',
+ "doteq;": '\U00002250',
+ "doteqdot;": '\U00002251',
+ "dotminus;": '\U00002238',
+ "dotplus;": '\U00002214',
+ "dotsquare;": '\U000022A1',
+ "doublebarwedge;": '\U00002306',
+ "downarrow;": '\U00002193',
+ "downdownarrows;": '\U000021CA',
+ "downharpoonleft;": '\U000021C3',
+ "downharpoonright;": '\U000021C2',
+ "drbkarow;": '\U00002910',
+ "drcorn;": '\U0000231F',
+ "drcrop;": '\U0000230C',
+ "dscr;": '\U0001D4B9',
+ "dscy;": '\U00000455',
+ "dsol;": '\U000029F6',
+ "dstrok;": '\U00000111',
+ "dtdot;": '\U000022F1',
+ "dtri;": '\U000025BF',
+ "dtrif;": '\U000025BE',
+ "duarr;": '\U000021F5',
+ "duhar;": '\U0000296F',
+ "dwangle;": '\U000029A6',
+ "dzcy;": '\U0000045F',
+ "dzigrarr;": '\U000027FF',
+ "eDDot;": '\U00002A77',
+ "eDot;": '\U00002251',
+ "eacute;": '\U000000E9',
+ "easter;": '\U00002A6E',
+ "ecaron;": '\U0000011B',
+ "ecir;": '\U00002256',
+ "ecirc;": '\U000000EA',
+ "ecolon;": '\U00002255',
+ "ecy;": '\U0000044D',
+ "edot;": '\U00000117',
+ "ee;": '\U00002147',
+ "efDot;": '\U00002252',
+ "efr;": '\U0001D522',
+ "eg;": '\U00002A9A',
+ "egrave;": '\U000000E8',
+ "egs;": '\U00002A96',
+ "egsdot;": '\U00002A98',
+ "el;": '\U00002A99',
+ "elinters;": '\U000023E7',
+ "ell;": '\U00002113',
+ "els;": '\U00002A95',
+ "elsdot;": '\U00002A97',
+ "emacr;": '\U00000113',
+ "empty;": '\U00002205',
+ "emptyset;": '\U00002205',
+ "emptyv;": '\U00002205',
+ "emsp;": '\U00002003',
+ "emsp13;": '\U00002004',
+ "emsp14;": '\U00002005',
+ "eng;": '\U0000014B',
+ "ensp;": '\U00002002',
+ "eogon;": '\U00000119',
+ "eopf;": '\U0001D556',
+ "epar;": '\U000022D5',
+ "eparsl;": '\U000029E3',
+ "eplus;": '\U00002A71',
+ "epsi;": '\U000003B5',
+ "epsilon;": '\U000003B5',
+ "epsiv;": '\U000003F5',
+ "eqcirc;": '\U00002256',
+ "eqcolon;": '\U00002255',
+ "eqsim;": '\U00002242',
+ "eqslantgtr;": '\U00002A96',
+ "eqslantless;": '\U00002A95',
+ "equals;": '\U0000003D',
+ "equest;": '\U0000225F',
+ "equiv;": '\U00002261',
+ "equivDD;": '\U00002A78',
+ "eqvparsl;": '\U000029E5',
+ "erDot;": '\U00002253',
+ "erarr;": '\U00002971',
+ "escr;": '\U0000212F',
+ "esdot;": '\U00002250',
+ "esim;": '\U00002242',
+ "eta;": '\U000003B7',
+ "eth;": '\U000000F0',
+ "euml;": '\U000000EB',
+ "euro;": '\U000020AC',
+ "excl;": '\U00000021',
+ "exist;": '\U00002203',
+ "expectation;": '\U00002130',
+ "exponentiale;": '\U00002147',
+ "fallingdotseq;": '\U00002252',
+ "fcy;": '\U00000444',
+ "female;": '\U00002640',
+ "ffilig;": '\U0000FB03',
+ "fflig;": '\U0000FB00',
+ "ffllig;": '\U0000FB04',
+ "ffr;": '\U0001D523',
+ "filig;": '\U0000FB01',
+ "flat;": '\U0000266D',
+ "fllig;": '\U0000FB02',
+ "fltns;": '\U000025B1',
+ "fnof;": '\U00000192',
+ "fopf;": '\U0001D557',
+ "forall;": '\U00002200',
+ "fork;": '\U000022D4',
+ "forkv;": '\U00002AD9',
+ "fpartint;": '\U00002A0D',
+ "frac12;": '\U000000BD',
+ "frac13;": '\U00002153',
+ "frac14;": '\U000000BC',
+ "frac15;": '\U00002155',
+ "frac16;": '\U00002159',
+ "frac18;": '\U0000215B',
+ "frac23;": '\U00002154',
+ "frac25;": '\U00002156',
+ "frac34;": '\U000000BE',
+ "frac35;": '\U00002157',
+ "frac38;": '\U0000215C',
+ "frac45;": '\U00002158',
+ "frac56;": '\U0000215A',
+ "frac58;": '\U0000215D',
+ "frac78;": '\U0000215E',
+ "frasl;": '\U00002044',
+ "frown;": '\U00002322',
+ "fscr;": '\U0001D4BB',
+ "gE;": '\U00002267',
+ "gEl;": '\U00002A8C',
+ "gacute;": '\U000001F5',
+ "gamma;": '\U000003B3',
+ "gammad;": '\U000003DD',
+ "gap;": '\U00002A86',
+ "gbreve;": '\U0000011F',
+ "gcirc;": '\U0000011D',
+ "gcy;": '\U00000433',
+ "gdot;": '\U00000121',
+ "ge;": '\U00002265',
+ "gel;": '\U000022DB',
+ "geq;": '\U00002265',
+ "geqq;": '\U00002267',
+ "geqslant;": '\U00002A7E',
+ "ges;": '\U00002A7E',
+ "gescc;": '\U00002AA9',
+ "gesdot;": '\U00002A80',
+ "gesdoto;": '\U00002A82',
+ "gesdotol;": '\U00002A84',
+ "gesles;": '\U00002A94',
+ "gfr;": '\U0001D524',
+ "gg;": '\U0000226B',
+ "ggg;": '\U000022D9',
+ "gimel;": '\U00002137',
+ "gjcy;": '\U00000453',
+ "gl;": '\U00002277',
+ "glE;": '\U00002A92',
+ "gla;": '\U00002AA5',
+ "glj;": '\U00002AA4',
+ "gnE;": '\U00002269',
+ "gnap;": '\U00002A8A',
+ "gnapprox;": '\U00002A8A',
+ "gne;": '\U00002A88',
+ "gneq;": '\U00002A88',
+ "gneqq;": '\U00002269',
+ "gnsim;": '\U000022E7',
+ "gopf;": '\U0001D558',
+ "grave;": '\U00000060',
+ "gscr;": '\U0000210A',
+ "gsim;": '\U00002273',
+ "gsime;": '\U00002A8E',
+ "gsiml;": '\U00002A90',
+ "gt;": '\U0000003E',
+ "gtcc;": '\U00002AA7',
+ "gtcir;": '\U00002A7A',
+ "gtdot;": '\U000022D7',
+ "gtlPar;": '\U00002995',
+ "gtquest;": '\U00002A7C',
+ "gtrapprox;": '\U00002A86',
+ "gtrarr;": '\U00002978',
+ "gtrdot;": '\U000022D7',
+ "gtreqless;": '\U000022DB',
+ "gtreqqless;": '\U00002A8C',
+ "gtrless;": '\U00002277',
+ "gtrsim;": '\U00002273',
+ "hArr;": '\U000021D4',
+ "hairsp;": '\U0000200A',
+ "half;": '\U000000BD',
+ "hamilt;": '\U0000210B',
+ "hardcy;": '\U0000044A',
+ "harr;": '\U00002194',
+ "harrcir;": '\U00002948',
+ "harrw;": '\U000021AD',
+ "hbar;": '\U0000210F',
+ "hcirc;": '\U00000125',
+ "hearts;": '\U00002665',
+ "heartsuit;": '\U00002665',
+ "hellip;": '\U00002026',
+ "hercon;": '\U000022B9',
+ "hfr;": '\U0001D525',
+ "hksearow;": '\U00002925',
+ "hkswarow;": '\U00002926',
+ "hoarr;": '\U000021FF',
+ "homtht;": '\U0000223B',
+ "hookleftarrow;": '\U000021A9',
+ "hookrightarrow;": '\U000021AA',
+ "hopf;": '\U0001D559',
+ "horbar;": '\U00002015',
+ "hscr;": '\U0001D4BD',
+ "hslash;": '\U0000210F',
+ "hstrok;": '\U00000127',
+ "hybull;": '\U00002043',
+ "hyphen;": '\U00002010',
+ "iacute;": '\U000000ED',
+ "ic;": '\U00002063',
+ "icirc;": '\U000000EE',
+ "icy;": '\U00000438',
+ "iecy;": '\U00000435',
+ "iexcl;": '\U000000A1',
+ "iff;": '\U000021D4',
+ "ifr;": '\U0001D526',
+ "igrave;": '\U000000EC',
+ "ii;": '\U00002148',
+ "iiiint;": '\U00002A0C',
+ "iiint;": '\U0000222D',
+ "iinfin;": '\U000029DC',
+ "iiota;": '\U00002129',
+ "ijlig;": '\U00000133',
+ "imacr;": '\U0000012B',
+ "image;": '\U00002111',
+ "imagline;": '\U00002110',
+ "imagpart;": '\U00002111',
+ "imath;": '\U00000131',
+ "imof;": '\U000022B7',
+ "imped;": '\U000001B5',
+ "in;": '\U00002208',
+ "incare;": '\U00002105',
+ "infin;": '\U0000221E',
+ "infintie;": '\U000029DD',
+ "inodot;": '\U00000131',
+ "int;": '\U0000222B',
+ "intcal;": '\U000022BA',
+ "integers;": '\U00002124',
+ "intercal;": '\U000022BA',
+ "intlarhk;": '\U00002A17',
+ "intprod;": '\U00002A3C',
+ "iocy;": '\U00000451',
+ "iogon;": '\U0000012F',
+ "iopf;": '\U0001D55A',
+ "iota;": '\U000003B9',
+ "iprod;": '\U00002A3C',
+ "iquest;": '\U000000BF',
+ "iscr;": '\U0001D4BE',
+ "isin;": '\U00002208',
+ "isinE;": '\U000022F9',
+ "isindot;": '\U000022F5',
+ "isins;": '\U000022F4',
+ "isinsv;": '\U000022F3',
+ "isinv;": '\U00002208',
+ "it;": '\U00002062',
+ "itilde;": '\U00000129',
+ "iukcy;": '\U00000456',
+ "iuml;": '\U000000EF',
+ "jcirc;": '\U00000135',
+ "jcy;": '\U00000439',
+ "jfr;": '\U0001D527',
+ "jmath;": '\U00000237',
+ "jopf;": '\U0001D55B',
+ "jscr;": '\U0001D4BF',
+ "jsercy;": '\U00000458',
+ "jukcy;": '\U00000454',
+ "kappa;": '\U000003BA',
+ "kappav;": '\U000003F0',
+ "kcedil;": '\U00000137',
+ "kcy;": '\U0000043A',
+ "kfr;": '\U0001D528',
+ "kgreen;": '\U00000138',
+ "khcy;": '\U00000445',
+ "kjcy;": '\U0000045C',
+ "kopf;": '\U0001D55C',
+ "kscr;": '\U0001D4C0',
+ "lAarr;": '\U000021DA',
+ "lArr;": '\U000021D0',
+ "lAtail;": '\U0000291B',
+ "lBarr;": '\U0000290E',
+ "lE;": '\U00002266',
+ "lEg;": '\U00002A8B',
+ "lHar;": '\U00002962',
+ "lacute;": '\U0000013A',
+ "laemptyv;": '\U000029B4',
+ "lagran;": '\U00002112',
+ "lambda;": '\U000003BB',
+ "lang;": '\U000027E8',
+ "langd;": '\U00002991',
+ "langle;": '\U000027E8',
+ "lap;": '\U00002A85',
+ "laquo;": '\U000000AB',
+ "larr;": '\U00002190',
+ "larrb;": '\U000021E4',
+ "larrbfs;": '\U0000291F',
+ "larrfs;": '\U0000291D',
+ "larrhk;": '\U000021A9',
+ "larrlp;": '\U000021AB',
+ "larrpl;": '\U00002939',
+ "larrsim;": '\U00002973',
+ "larrtl;": '\U000021A2',
+ "lat;": '\U00002AAB',
+ "latail;": '\U00002919',
+ "late;": '\U00002AAD',
+ "lbarr;": '\U0000290C',
+ "lbbrk;": '\U00002772',
+ "lbrace;": '\U0000007B',
+ "lbrack;": '\U0000005B',
+ "lbrke;": '\U0000298B',
+ "lbrksld;": '\U0000298F',
+ "lbrkslu;": '\U0000298D',
+ "lcaron;": '\U0000013E',
+ "lcedil;": '\U0000013C',
+ "lceil;": '\U00002308',
+ "lcub;": '\U0000007B',
+ "lcy;": '\U0000043B',
+ "ldca;": '\U00002936',
+ "ldquo;": '\U0000201C',
+ "ldquor;": '\U0000201E',
+ "ldrdhar;": '\U00002967',
+ "ldrushar;": '\U0000294B',
+ "ldsh;": '\U000021B2',
+ "le;": '\U00002264',
+ "leftarrow;": '\U00002190',
+ "leftarrowtail;": '\U000021A2',
+ "leftharpoondown;": '\U000021BD',
+ "leftharpoonup;": '\U000021BC',
+ "leftleftarrows;": '\U000021C7',
+ "leftrightarrow;": '\U00002194',
+ "leftrightarrows;": '\U000021C6',
+ "leftrightharpoons;": '\U000021CB',
+ "leftrightsquigarrow;": '\U000021AD',
+ "leftthreetimes;": '\U000022CB',
+ "leg;": '\U000022DA',
+ "leq;": '\U00002264',
+ "leqq;": '\U00002266',
+ "leqslant;": '\U00002A7D',
+ "les;": '\U00002A7D',
+ "lescc;": '\U00002AA8',
+ "lesdot;": '\U00002A7F',
+ "lesdoto;": '\U00002A81',
+ "lesdotor;": '\U00002A83',
+ "lesges;": '\U00002A93',
+ "lessapprox;": '\U00002A85',
+ "lessdot;": '\U000022D6',
+ "lesseqgtr;": '\U000022DA',
+ "lesseqqgtr;": '\U00002A8B',
+ "lessgtr;": '\U00002276',
+ "lesssim;": '\U00002272',
+ "lfisht;": '\U0000297C',
+ "lfloor;": '\U0000230A',
+ "lfr;": '\U0001D529',
+ "lg;": '\U00002276',
+ "lgE;": '\U00002A91',
+ "lhard;": '\U000021BD',
+ "lharu;": '\U000021BC',
+ "lharul;": '\U0000296A',
+ "lhblk;": '\U00002584',
+ "ljcy;": '\U00000459',
+ "ll;": '\U0000226A',
+ "llarr;": '\U000021C7',
+ "llcorner;": '\U0000231E',
+ "llhard;": '\U0000296B',
+ "lltri;": '\U000025FA',
+ "lmidot;": '\U00000140',
+ "lmoust;": '\U000023B0',
+ "lmoustache;": '\U000023B0',
+ "lnE;": '\U00002268',
+ "lnap;": '\U00002A89',
+ "lnapprox;": '\U00002A89',
+ "lne;": '\U00002A87',
+ "lneq;": '\U00002A87',
+ "lneqq;": '\U00002268',
+ "lnsim;": '\U000022E6',
+ "loang;": '\U000027EC',
+ "loarr;": '\U000021FD',
+ "lobrk;": '\U000027E6',
+ "longleftarrow;": '\U000027F5',
+ "longleftrightarrow;": '\U000027F7',
+ "longmapsto;": '\U000027FC',
+ "longrightarrow;": '\U000027F6',
+ "looparrowleft;": '\U000021AB',
+ "looparrowright;": '\U000021AC',
+ "lopar;": '\U00002985',
+ "lopf;": '\U0001D55D',
+ "loplus;": '\U00002A2D',
+ "lotimes;": '\U00002A34',
+ "lowast;": '\U00002217',
+ "lowbar;": '\U0000005F',
+ "loz;": '\U000025CA',
+ "lozenge;": '\U000025CA',
+ "lozf;": '\U000029EB',
+ "lpar;": '\U00000028',
+ "lparlt;": '\U00002993',
+ "lrarr;": '\U000021C6',
+ "lrcorner;": '\U0000231F',
+ "lrhar;": '\U000021CB',
+ "lrhard;": '\U0000296D',
+ "lrm;": '\U0000200E',
+ "lrtri;": '\U000022BF',
+ "lsaquo;": '\U00002039',
+ "lscr;": '\U0001D4C1',
+ "lsh;": '\U000021B0',
+ "lsim;": '\U00002272',
+ "lsime;": '\U00002A8D',
+ "lsimg;": '\U00002A8F',
+ "lsqb;": '\U0000005B',
+ "lsquo;": '\U00002018',
+ "lsquor;": '\U0000201A',
+ "lstrok;": '\U00000142',
+ "lt;": '\U0000003C',
+ "ltcc;": '\U00002AA6',
+ "ltcir;": '\U00002A79',
+ "ltdot;": '\U000022D6',
+ "lthree;": '\U000022CB',
+ "ltimes;": '\U000022C9',
+ "ltlarr;": '\U00002976',
+ "ltquest;": '\U00002A7B',
+ "ltrPar;": '\U00002996',
+ "ltri;": '\U000025C3',
+ "ltrie;": '\U000022B4',
+ "ltrif;": '\U000025C2',
+ "lurdshar;": '\U0000294A',
+ "luruhar;": '\U00002966',
+ "mDDot;": '\U0000223A',
+ "macr;": '\U000000AF',
+ "male;": '\U00002642',
+ "malt;": '\U00002720',
+ "maltese;": '\U00002720',
+ "map;": '\U000021A6',
+ "mapsto;": '\U000021A6',
+ "mapstodown;": '\U000021A7',
+ "mapstoleft;": '\U000021A4',
+ "mapstoup;": '\U000021A5',
+ "marker;": '\U000025AE',
+ "mcomma;": '\U00002A29',
+ "mcy;": '\U0000043C',
+ "mdash;": '\U00002014',
+ "measuredangle;": '\U00002221',
+ "mfr;": '\U0001D52A',
+ "mho;": '\U00002127',
+ "micro;": '\U000000B5',
+ "mid;": '\U00002223',
+ "midast;": '\U0000002A',
+ "midcir;": '\U00002AF0',
+ "middot;": '\U000000B7',
+ "minus;": '\U00002212',
+ "minusb;": '\U0000229F',
+ "minusd;": '\U00002238',
+ "minusdu;": '\U00002A2A',
+ "mlcp;": '\U00002ADB',
+ "mldr;": '\U00002026',
+ "mnplus;": '\U00002213',
+ "models;": '\U000022A7',
+ "mopf;": '\U0001D55E',
+ "mp;": '\U00002213',
+ "mscr;": '\U0001D4C2',
+ "mstpos;": '\U0000223E',
+ "mu;": '\U000003BC',
+ "multimap;": '\U000022B8',
+ "mumap;": '\U000022B8',
+ "nLeftarrow;": '\U000021CD',
+ "nLeftrightarrow;": '\U000021CE',
+ "nRightarrow;": '\U000021CF',
+ "nVDash;": '\U000022AF',
+ "nVdash;": '\U000022AE',
+ "nabla;": '\U00002207',
+ "nacute;": '\U00000144',
+ "nap;": '\U00002249',
+ "napos;": '\U00000149',
+ "napprox;": '\U00002249',
+ "natur;": '\U0000266E',
+ "natural;": '\U0000266E',
+ "naturals;": '\U00002115',
+ "nbsp;": '\U000000A0',
+ "ncap;": '\U00002A43',
+ "ncaron;": '\U00000148',
+ "ncedil;": '\U00000146',
+ "ncong;": '\U00002247',
+ "ncup;": '\U00002A42',
+ "ncy;": '\U0000043D',
+ "ndash;": '\U00002013',
+ "ne;": '\U00002260',
+ "neArr;": '\U000021D7',
+ "nearhk;": '\U00002924',
+ "nearr;": '\U00002197',
+ "nearrow;": '\U00002197',
+ "nequiv;": '\U00002262',
+ "nesear;": '\U00002928',
+ "nexist;": '\U00002204',
+ "nexists;": '\U00002204',
+ "nfr;": '\U0001D52B',
+ "nge;": '\U00002271',
+ "ngeq;": '\U00002271',
+ "ngsim;": '\U00002275',
+ "ngt;": '\U0000226F',
+ "ngtr;": '\U0000226F',
+ "nhArr;": '\U000021CE',
+ "nharr;": '\U000021AE',
+ "nhpar;": '\U00002AF2',
+ "ni;": '\U0000220B',
+ "nis;": '\U000022FC',
+ "nisd;": '\U000022FA',
+ "niv;": '\U0000220B',
+ "njcy;": '\U0000045A',
+ "nlArr;": '\U000021CD',
+ "nlarr;": '\U0000219A',
+ "nldr;": '\U00002025',
+ "nle;": '\U00002270',
+ "nleftarrow;": '\U0000219A',
+ "nleftrightarrow;": '\U000021AE',
+ "nleq;": '\U00002270',
+ "nless;": '\U0000226E',
+ "nlsim;": '\U00002274',
+ "nlt;": '\U0000226E',
+ "nltri;": '\U000022EA',
+ "nltrie;": '\U000022EC',
+ "nmid;": '\U00002224',
+ "nopf;": '\U0001D55F',
+ "not;": '\U000000AC',
+ "notin;": '\U00002209',
+ "notinva;": '\U00002209',
+ "notinvb;": '\U000022F7',
+ "notinvc;": '\U000022F6',
+ "notni;": '\U0000220C',
+ "notniva;": '\U0000220C',
+ "notnivb;": '\U000022FE',
+ "notnivc;": '\U000022FD',
+ "npar;": '\U00002226',
+ "nparallel;": '\U00002226',
+ "npolint;": '\U00002A14',
+ "npr;": '\U00002280',
+ "nprcue;": '\U000022E0',
+ "nprec;": '\U00002280',
+ "nrArr;": '\U000021CF',
+ "nrarr;": '\U0000219B',
+ "nrightarrow;": '\U0000219B',
+ "nrtri;": '\U000022EB',
+ "nrtrie;": '\U000022ED',
+ "nsc;": '\U00002281',
+ "nsccue;": '\U000022E1',
+ "nscr;": '\U0001D4C3',
+ "nshortmid;": '\U00002224',
+ "nshortparallel;": '\U00002226',
+ "nsim;": '\U00002241',
+ "nsime;": '\U00002244',
+ "nsimeq;": '\U00002244',
+ "nsmid;": '\U00002224',
+ "nspar;": '\U00002226',
+ "nsqsube;": '\U000022E2',
+ "nsqsupe;": '\U000022E3',
+ "nsub;": '\U00002284',
+ "nsube;": '\U00002288',
+ "nsubseteq;": '\U00002288',
+ "nsucc;": '\U00002281',
+ "nsup;": '\U00002285',
+ "nsupe;": '\U00002289',
+ "nsupseteq;": '\U00002289',
+ "ntgl;": '\U00002279',
+ "ntilde;": '\U000000F1',
+ "ntlg;": '\U00002278',
+ "ntriangleleft;": '\U000022EA',
+ "ntrianglelefteq;": '\U000022EC',
+ "ntriangleright;": '\U000022EB',
+ "ntrianglerighteq;": '\U000022ED',
+ "nu;": '\U000003BD',
+ "num;": '\U00000023',
+ "numero;": '\U00002116',
+ "numsp;": '\U00002007',
+ "nvDash;": '\U000022AD',
+ "nvHarr;": '\U00002904',
+ "nvdash;": '\U000022AC',
+ "nvinfin;": '\U000029DE',
+ "nvlArr;": '\U00002902',
+ "nvrArr;": '\U00002903',
+ "nwArr;": '\U000021D6',
+ "nwarhk;": '\U00002923',
+ "nwarr;": '\U00002196',
+ "nwarrow;": '\U00002196',
+ "nwnear;": '\U00002927',
+ "oS;": '\U000024C8',
+ "oacute;": '\U000000F3',
+ "oast;": '\U0000229B',
+ "ocir;": '\U0000229A',
+ "ocirc;": '\U000000F4',
+ "ocy;": '\U0000043E',
+ "odash;": '\U0000229D',
+ "odblac;": '\U00000151',
+ "odiv;": '\U00002A38',
+ "odot;": '\U00002299',
+ "odsold;": '\U000029BC',
+ "oelig;": '\U00000153',
+ "ofcir;": '\U000029BF',
+ "ofr;": '\U0001D52C',
+ "ogon;": '\U000002DB',
+ "ograve;": '\U000000F2',
+ "ogt;": '\U000029C1',
+ "ohbar;": '\U000029B5',
+ "ohm;": '\U000003A9',
+ "oint;": '\U0000222E',
+ "olarr;": '\U000021BA',
+ "olcir;": '\U000029BE',
+ "olcross;": '\U000029BB',
+ "oline;": '\U0000203E',
+ "olt;": '\U000029C0',
+ "omacr;": '\U0000014D',
+ "omega;": '\U000003C9',
+ "omicron;": '\U000003BF',
+ "omid;": '\U000029B6',
+ "ominus;": '\U00002296',
+ "oopf;": '\U0001D560',
+ "opar;": '\U000029B7',
+ "operp;": '\U000029B9',
+ "oplus;": '\U00002295',
+ "or;": '\U00002228',
+ "orarr;": '\U000021BB',
+ "ord;": '\U00002A5D',
+ "order;": '\U00002134',
+ "orderof;": '\U00002134',
+ "ordf;": '\U000000AA',
+ "ordm;": '\U000000BA',
+ "origof;": '\U000022B6',
+ "oror;": '\U00002A56',
+ "orslope;": '\U00002A57',
+ "orv;": '\U00002A5B',
+ "oscr;": '\U00002134',
+ "oslash;": '\U000000F8',
+ "osol;": '\U00002298',
+ "otilde;": '\U000000F5',
+ "otimes;": '\U00002297',
+ "otimesas;": '\U00002A36',
+ "ouml;": '\U000000F6',
+ "ovbar;": '\U0000233D',
+ "par;": '\U00002225',
+ "para;": '\U000000B6',
+ "parallel;": '\U00002225',
+ "parsim;": '\U00002AF3',
+ "parsl;": '\U00002AFD',
+ "part;": '\U00002202',
+ "pcy;": '\U0000043F',
+ "percnt;": '\U00000025',
+ "period;": '\U0000002E',
+ "permil;": '\U00002030',
+ "perp;": '\U000022A5',
+ "pertenk;": '\U00002031',
+ "pfr;": '\U0001D52D',
+ "phi;": '\U000003C6',
+ "phiv;": '\U000003D5',
+ "phmmat;": '\U00002133',
+ "phone;": '\U0000260E',
+ "pi;": '\U000003C0',
+ "pitchfork;": '\U000022D4',
+ "piv;": '\U000003D6',
+ "planck;": '\U0000210F',
+ "planckh;": '\U0000210E',
+ "plankv;": '\U0000210F',
+ "plus;": '\U0000002B',
+ "plusacir;": '\U00002A23',
+ "plusb;": '\U0000229E',
+ "pluscir;": '\U00002A22',
+ "plusdo;": '\U00002214',
+ "plusdu;": '\U00002A25',
+ "pluse;": '\U00002A72',
+ "plusmn;": '\U000000B1',
+ "plussim;": '\U00002A26',
+ "plustwo;": '\U00002A27',
+ "pm;": '\U000000B1',
+ "pointint;": '\U00002A15',
+ "popf;": '\U0001D561',
+ "pound;": '\U000000A3',
+ "pr;": '\U0000227A',
+ "prE;": '\U00002AB3',
+ "prap;": '\U00002AB7',
+ "prcue;": '\U0000227C',
+ "pre;": '\U00002AAF',
+ "prec;": '\U0000227A',
+ "precapprox;": '\U00002AB7',
+ "preccurlyeq;": '\U0000227C',
+ "preceq;": '\U00002AAF',
+ "precnapprox;": '\U00002AB9',
+ "precneqq;": '\U00002AB5',
+ "precnsim;": '\U000022E8',
+ "precsim;": '\U0000227E',
+ "prime;": '\U00002032',
+ "primes;": '\U00002119',
+ "prnE;": '\U00002AB5',
+ "prnap;": '\U00002AB9',
+ "prnsim;": '\U000022E8',
+ "prod;": '\U0000220F',
+ "profalar;": '\U0000232E',
+ "profline;": '\U00002312',
+ "profsurf;": '\U00002313',
+ "prop;": '\U0000221D',
+ "propto;": '\U0000221D',
+ "prsim;": '\U0000227E',
+ "prurel;": '\U000022B0',
+ "pscr;": '\U0001D4C5',
+ "psi;": '\U000003C8',
+ "puncsp;": '\U00002008',
+ "qfr;": '\U0001D52E',
+ "qint;": '\U00002A0C',
+ "qopf;": '\U0001D562',
+ "qprime;": '\U00002057',
+ "qscr;": '\U0001D4C6',
+ "quaternions;": '\U0000210D',
+ "quatint;": '\U00002A16',
+ "quest;": '\U0000003F',
+ "questeq;": '\U0000225F',
+ "quot;": '\U00000022',
+ "rAarr;": '\U000021DB',
+ "rArr;": '\U000021D2',
+ "rAtail;": '\U0000291C',
+ "rBarr;": '\U0000290F',
+ "rHar;": '\U00002964',
+ "racute;": '\U00000155',
+ "radic;": '\U0000221A',
+ "raemptyv;": '\U000029B3',
+ "rang;": '\U000027E9',
+ "rangd;": '\U00002992',
+ "range;": '\U000029A5',
+ "rangle;": '\U000027E9',
+ "raquo;": '\U000000BB',
+ "rarr;": '\U00002192',
+ "rarrap;": '\U00002975',
+ "rarrb;": '\U000021E5',
+ "rarrbfs;": '\U00002920',
+ "rarrc;": '\U00002933',
+ "rarrfs;": '\U0000291E',
+ "rarrhk;": '\U000021AA',
+ "rarrlp;": '\U000021AC',
+ "rarrpl;": '\U00002945',
+ "rarrsim;": '\U00002974',
+ "rarrtl;": '\U000021A3',
+ "rarrw;": '\U0000219D',
+ "ratail;": '\U0000291A',
+ "ratio;": '\U00002236',
+ "rationals;": '\U0000211A',
+ "rbarr;": '\U0000290D',
+ "rbbrk;": '\U00002773',
+ "rbrace;": '\U0000007D',
+ "rbrack;": '\U0000005D',
+ "rbrke;": '\U0000298C',
+ "rbrksld;": '\U0000298E',
+ "rbrkslu;": '\U00002990',
+ "rcaron;": '\U00000159',
+ "rcedil;": '\U00000157',
+ "rceil;": '\U00002309',
+ "rcub;": '\U0000007D',
+ "rcy;": '\U00000440',
+ "rdca;": '\U00002937',
+ "rdldhar;": '\U00002969',
+ "rdquo;": '\U0000201D',
+ "rdquor;": '\U0000201D',
+ "rdsh;": '\U000021B3',
+ "real;": '\U0000211C',
+ "realine;": '\U0000211B',
+ "realpart;": '\U0000211C',
+ "reals;": '\U0000211D',
+ "rect;": '\U000025AD',
+ "reg;": '\U000000AE',
+ "rfisht;": '\U0000297D',
+ "rfloor;": '\U0000230B',
+ "rfr;": '\U0001D52F',
+ "rhard;": '\U000021C1',
+ "rharu;": '\U000021C0',
+ "rharul;": '\U0000296C',
+ "rho;": '\U000003C1',
+ "rhov;": '\U000003F1',
+ "rightarrow;": '\U00002192',
+ "rightarrowtail;": '\U000021A3',
+ "rightharpoondown;": '\U000021C1',
+ "rightharpoonup;": '\U000021C0',
+ "rightleftarrows;": '\U000021C4',
+ "rightleftharpoons;": '\U000021CC',
+ "rightrightarrows;": '\U000021C9',
+ "rightsquigarrow;": '\U0000219D',
+ "rightthreetimes;": '\U000022CC',
+ "ring;": '\U000002DA',
+ "risingdotseq;": '\U00002253',
+ "rlarr;": '\U000021C4',
+ "rlhar;": '\U000021CC',
+ "rlm;": '\U0000200F',
+ "rmoust;": '\U000023B1',
+ "rmoustache;": '\U000023B1',
+ "rnmid;": '\U00002AEE',
+ "roang;": '\U000027ED',
+ "roarr;": '\U000021FE',
+ "robrk;": '\U000027E7',
+ "ropar;": '\U00002986',
+ "ropf;": '\U0001D563',
+ "roplus;": '\U00002A2E',
+ "rotimes;": '\U00002A35',
+ "rpar;": '\U00000029',
+ "rpargt;": '\U00002994',
+ "rppolint;": '\U00002A12',
+ "rrarr;": '\U000021C9',
+ "rsaquo;": '\U0000203A',
+ "rscr;": '\U0001D4C7',
+ "rsh;": '\U000021B1',
+ "rsqb;": '\U0000005D',
+ "rsquo;": '\U00002019',
+ "rsquor;": '\U00002019',
+ "rthree;": '\U000022CC',
+ "rtimes;": '\U000022CA',
+ "rtri;": '\U000025B9',
+ "rtrie;": '\U000022B5',
+ "rtrif;": '\U000025B8',
+ "rtriltri;": '\U000029CE',
+ "ruluhar;": '\U00002968',
+ "rx;": '\U0000211E',
+ "sacute;": '\U0000015B',
+ "sbquo;": '\U0000201A',
+ "sc;": '\U0000227B',
+ "scE;": '\U00002AB4',
+ "scap;": '\U00002AB8',
+ "scaron;": '\U00000161',
+ "sccue;": '\U0000227D',
+ "sce;": '\U00002AB0',
+ "scedil;": '\U0000015F',
+ "scirc;": '\U0000015D',
+ "scnE;": '\U00002AB6',
+ "scnap;": '\U00002ABA',
+ "scnsim;": '\U000022E9',
+ "scpolint;": '\U00002A13',
+ "scsim;": '\U0000227F',
+ "scy;": '\U00000441',
+ "sdot;": '\U000022C5',
+ "sdotb;": '\U000022A1',
+ "sdote;": '\U00002A66',
+ "seArr;": '\U000021D8',
+ "searhk;": '\U00002925',
+ "searr;": '\U00002198',
+ "searrow;": '\U00002198',
+ "sect;": '\U000000A7',
+ "semi;": '\U0000003B',
+ "seswar;": '\U00002929',
+ "setminus;": '\U00002216',
+ "setmn;": '\U00002216',
+ "sext;": '\U00002736',
+ "sfr;": '\U0001D530',
+ "sfrown;": '\U00002322',
+ "sharp;": '\U0000266F',
+ "shchcy;": '\U00000449',
+ "shcy;": '\U00000448',
+ "shortmid;": '\U00002223',
+ "shortparallel;": '\U00002225',
+ "shy;": '\U000000AD',
+ "sigma;": '\U000003C3',
+ "sigmaf;": '\U000003C2',
+ "sigmav;": '\U000003C2',
+ "sim;": '\U0000223C',
+ "simdot;": '\U00002A6A',
+ "sime;": '\U00002243',
+ "simeq;": '\U00002243',
+ "simg;": '\U00002A9E',
+ "simgE;": '\U00002AA0',
+ "siml;": '\U00002A9D',
+ "simlE;": '\U00002A9F',
+ "simne;": '\U00002246',
+ "simplus;": '\U00002A24',
+ "simrarr;": '\U00002972',
+ "slarr;": '\U00002190',
+ "smallsetminus;": '\U00002216',
+ "smashp;": '\U00002A33',
+ "smeparsl;": '\U000029E4',
+ "smid;": '\U00002223',
+ "smile;": '\U00002323',
+ "smt;": '\U00002AAA',
+ "smte;": '\U00002AAC',
+ "softcy;": '\U0000044C',
+ "sol;": '\U0000002F',
+ "solb;": '\U000029C4',
+ "solbar;": '\U0000233F',
+ "sopf;": '\U0001D564',
+ "spades;": '\U00002660',
+ "spadesuit;": '\U00002660',
+ "spar;": '\U00002225',
+ "sqcap;": '\U00002293',
+ "sqcup;": '\U00002294',
+ "sqsub;": '\U0000228F',
+ "sqsube;": '\U00002291',
+ "sqsubset;": '\U0000228F',
+ "sqsubseteq;": '\U00002291',
+ "sqsup;": '\U00002290',
+ "sqsupe;": '\U00002292',
+ "sqsupset;": '\U00002290',
+ "sqsupseteq;": '\U00002292',
+ "squ;": '\U000025A1',
+ "square;": '\U000025A1',
+ "squarf;": '\U000025AA',
+ "squf;": '\U000025AA',
+ "srarr;": '\U00002192',
+ "sscr;": '\U0001D4C8',
+ "ssetmn;": '\U00002216',
+ "ssmile;": '\U00002323',
+ "sstarf;": '\U000022C6',
+ "star;": '\U00002606',
+ "starf;": '\U00002605',
+ "straightepsilon;": '\U000003F5',
+ "straightphi;": '\U000003D5',
+ "strns;": '\U000000AF',
+ "sub;": '\U00002282',
+ "subE;": '\U00002AC5',
+ "subdot;": '\U00002ABD',
+ "sube;": '\U00002286',
+ "subedot;": '\U00002AC3',
+ "submult;": '\U00002AC1',
+ "subnE;": '\U00002ACB',
+ "subne;": '\U0000228A',
+ "subplus;": '\U00002ABF',
+ "subrarr;": '\U00002979',
+ "subset;": '\U00002282',
+ "subseteq;": '\U00002286',
+ "subseteqq;": '\U00002AC5',
+ "subsetneq;": '\U0000228A',
+ "subsetneqq;": '\U00002ACB',
+ "subsim;": '\U00002AC7',
+ "subsub;": '\U00002AD5',
+ "subsup;": '\U00002AD3',
+ "succ;": '\U0000227B',
+ "succapprox;": '\U00002AB8',
+ "succcurlyeq;": '\U0000227D',
+ "succeq;": '\U00002AB0',
+ "succnapprox;": '\U00002ABA',
+ "succneqq;": '\U00002AB6',
+ "succnsim;": '\U000022E9',
+ "succsim;": '\U0000227F',
+ "sum;": '\U00002211',
+ "sung;": '\U0000266A',
+ "sup;": '\U00002283',
+ "sup1;": '\U000000B9',
+ "sup2;": '\U000000B2',
+ "sup3;": '\U000000B3',
+ "supE;": '\U00002AC6',
+ "supdot;": '\U00002ABE',
+ "supdsub;": '\U00002AD8',
+ "supe;": '\U00002287',
+ "supedot;": '\U00002AC4',
+ "suphsol;": '\U000027C9',
+ "suphsub;": '\U00002AD7',
+ "suplarr;": '\U0000297B',
+ "supmult;": '\U00002AC2',
+ "supnE;": '\U00002ACC',
+ "supne;": '\U0000228B',
+ "supplus;": '\U00002AC0',
+ "supset;": '\U00002283',
+ "supseteq;": '\U00002287',
+ "supseteqq;": '\U00002AC6',
+ "supsetneq;": '\U0000228B',
+ "supsetneqq;": '\U00002ACC',
+ "supsim;": '\U00002AC8',
+ "supsub;": '\U00002AD4',
+ "supsup;": '\U00002AD6',
+ "swArr;": '\U000021D9',
+ "swarhk;": '\U00002926',
+ "swarr;": '\U00002199',
+ "swarrow;": '\U00002199',
+ "swnwar;": '\U0000292A',
+ "szlig;": '\U000000DF',
+ "target;": '\U00002316',
+ "tau;": '\U000003C4',
+ "tbrk;": '\U000023B4',
+ "tcaron;": '\U00000165',
+ "tcedil;": '\U00000163',
+ "tcy;": '\U00000442',
+ "tdot;": '\U000020DB',
+ "telrec;": '\U00002315',
+ "tfr;": '\U0001D531',
+ "there4;": '\U00002234',
+ "therefore;": '\U00002234',
+ "theta;": '\U000003B8',
+ "thetasym;": '\U000003D1',
+ "thetav;": '\U000003D1',
+ "thickapprox;": '\U00002248',
+ "thicksim;": '\U0000223C',
+ "thinsp;": '\U00002009',
+ "thkap;": '\U00002248',
+ "thksim;": '\U0000223C',
+ "thorn;": '\U000000FE',
+ "tilde;": '\U000002DC',
+ "times;": '\U000000D7',
+ "timesb;": '\U000022A0',
+ "timesbar;": '\U00002A31',
+ "timesd;": '\U00002A30',
+ "tint;": '\U0000222D',
+ "toea;": '\U00002928',
+ "top;": '\U000022A4',
+ "topbot;": '\U00002336',
+ "topcir;": '\U00002AF1',
+ "topf;": '\U0001D565',
+ "topfork;": '\U00002ADA',
+ "tosa;": '\U00002929',
+ "tprime;": '\U00002034',
+ "trade;": '\U00002122',
+ "triangle;": '\U000025B5',
+ "triangledown;": '\U000025BF',
+ "triangleleft;": '\U000025C3',
+ "trianglelefteq;": '\U000022B4',
+ "triangleq;": '\U0000225C',
+ "triangleright;": '\U000025B9',
+ "trianglerighteq;": '\U000022B5',
+ "tridot;": '\U000025EC',
+ "trie;": '\U0000225C',
+ "triminus;": '\U00002A3A',
+ "triplus;": '\U00002A39',
+ "trisb;": '\U000029CD',
+ "tritime;": '\U00002A3B',
+ "trpezium;": '\U000023E2',
+ "tscr;": '\U0001D4C9',
+ "tscy;": '\U00000446',
+ "tshcy;": '\U0000045B',
+ "tstrok;": '\U00000167',
+ "twixt;": '\U0000226C',
+ "twoheadleftarrow;": '\U0000219E',
+ "twoheadrightarrow;": '\U000021A0',
+ "uArr;": '\U000021D1',
+ "uHar;": '\U00002963',
+ "uacute;": '\U000000FA',
+ "uarr;": '\U00002191',
+ "ubrcy;": '\U0000045E',
+ "ubreve;": '\U0000016D',
+ "ucirc;": '\U000000FB',
+ "ucy;": '\U00000443',
+ "udarr;": '\U000021C5',
+ "udblac;": '\U00000171',
+ "udhar;": '\U0000296E',
+ "ufisht;": '\U0000297E',
+ "ufr;": '\U0001D532',
+ "ugrave;": '\U000000F9',
+ "uharl;": '\U000021BF',
+ "uharr;": '\U000021BE',
+ "uhblk;": '\U00002580',
+ "ulcorn;": '\U0000231C',
+ "ulcorner;": '\U0000231C',
+ "ulcrop;": '\U0000230F',
+ "ultri;": '\U000025F8',
+ "umacr;": '\U0000016B',
+ "uml;": '\U000000A8',
+ "uogon;": '\U00000173',
+ "uopf;": '\U0001D566',
+ "uparrow;": '\U00002191',
+ "updownarrow;": '\U00002195',
+ "upharpoonleft;": '\U000021BF',
+ "upharpoonright;": '\U000021BE',
+ "uplus;": '\U0000228E',
+ "upsi;": '\U000003C5',
+ "upsih;": '\U000003D2',
+ "upsilon;": '\U000003C5',
+ "upuparrows;": '\U000021C8',
+ "urcorn;": '\U0000231D',
+ "urcorner;": '\U0000231D',
+ "urcrop;": '\U0000230E',
+ "uring;": '\U0000016F',
+ "urtri;": '\U000025F9',
+ "uscr;": '\U0001D4CA',
+ "utdot;": '\U000022F0',
+ "utilde;": '\U00000169',
+ "utri;": '\U000025B5',
+ "utrif;": '\U000025B4',
+ "uuarr;": '\U000021C8',
+ "uuml;": '\U000000FC',
+ "uwangle;": '\U000029A7',
+ "vArr;": '\U000021D5',
+ "vBar;": '\U00002AE8',
+ "vBarv;": '\U00002AE9',
+ "vDash;": '\U000022A8',
+ "vangrt;": '\U0000299C',
+ "varepsilon;": '\U000003F5',
+ "varkappa;": '\U000003F0',
+ "varnothing;": '\U00002205',
+ "varphi;": '\U000003D5',
+ "varpi;": '\U000003D6',
+ "varpropto;": '\U0000221D',
+ "varr;": '\U00002195',
+ "varrho;": '\U000003F1',
+ "varsigma;": '\U000003C2',
+ "vartheta;": '\U000003D1',
+ "vartriangleleft;": '\U000022B2',
+ "vartriangleright;": '\U000022B3',
+ "vcy;": '\U00000432',
+ "vdash;": '\U000022A2',
+ "vee;": '\U00002228',
+ "veebar;": '\U000022BB',
+ "veeeq;": '\U0000225A',
+ "vellip;": '\U000022EE',
+ "verbar;": '\U0000007C',
+ "vert;": '\U0000007C',
+ "vfr;": '\U0001D533',
+ "vltri;": '\U000022B2',
+ "vopf;": '\U0001D567',
+ "vprop;": '\U0000221D',
+ "vrtri;": '\U000022B3',
+ "vscr;": '\U0001D4CB',
+ "vzigzag;": '\U0000299A',
+ "wcirc;": '\U00000175',
+ "wedbar;": '\U00002A5F',
+ "wedge;": '\U00002227',
+ "wedgeq;": '\U00002259',
+ "weierp;": '\U00002118',
+ "wfr;": '\U0001D534',
+ "wopf;": '\U0001D568',
+ "wp;": '\U00002118',
+ "wr;": '\U00002240',
+ "wreath;": '\U00002240',
+ "wscr;": '\U0001D4CC',
+ "xcap;": '\U000022C2',
+ "xcirc;": '\U000025EF',
+ "xcup;": '\U000022C3',
+ "xdtri;": '\U000025BD',
+ "xfr;": '\U0001D535',
+ "xhArr;": '\U000027FA',
+ "xharr;": '\U000027F7',
+ "xi;": '\U000003BE',
+ "xlArr;": '\U000027F8',
+ "xlarr;": '\U000027F5',
+ "xmap;": '\U000027FC',
+ "xnis;": '\U000022FB',
+ "xodot;": '\U00002A00',
+ "xopf;": '\U0001D569',
+ "xoplus;": '\U00002A01',
+ "xotime;": '\U00002A02',
+ "xrArr;": '\U000027F9',
+ "xrarr;": '\U000027F6',
+ "xscr;": '\U0001D4CD',
+ "xsqcup;": '\U00002A06',
+ "xuplus;": '\U00002A04',
+ "xutri;": '\U000025B3',
+ "xvee;": '\U000022C1',
+ "xwedge;": '\U000022C0',
+ "yacute;": '\U000000FD',
+ "yacy;": '\U0000044F',
+ "ycirc;": '\U00000177',
+ "ycy;": '\U0000044B',
+ "yen;": '\U000000A5',
+ "yfr;": '\U0001D536',
+ "yicy;": '\U00000457',
+ "yopf;": '\U0001D56A',
+ "yscr;": '\U0001D4CE',
+ "yucy;": '\U0000044E',
+ "yuml;": '\U000000FF',
+ "zacute;": '\U0000017A',
+ "zcaron;": '\U0000017E',
+ "zcy;": '\U00000437',
+ "zdot;": '\U0000017C',
+ "zeetrf;": '\U00002128',
+ "zeta;": '\U000003B6',
+ "zfr;": '\U0001D537',
+ "zhcy;": '\U00000436',
+ "zigrarr;": '\U000021DD',
+ "zopf;": '\U0001D56B',
+ "zscr;": '\U0001D4CF',
+ "zwj;": '\U0000200D',
+ "zwnj;": '\U0000200C',
+ "AElig": '\U000000C6',
+ "AMP": '\U00000026',
+ "Aacute": '\U000000C1',
+ "Acirc": '\U000000C2',
+ "Agrave": '\U000000C0',
+ "Aring": '\U000000C5',
+ "Atilde": '\U000000C3',
+ "Auml": '\U000000C4',
+ "COPY": '\U000000A9',
+ "Ccedil": '\U000000C7',
+ "ETH": '\U000000D0',
+ "Eacute": '\U000000C9',
+ "Ecirc": '\U000000CA',
+ "Egrave": '\U000000C8',
+ "Euml": '\U000000CB',
+ "GT": '\U0000003E',
+ "Iacute": '\U000000CD',
+ "Icirc": '\U000000CE',
+ "Igrave": '\U000000CC',
+ "Iuml": '\U000000CF',
+ "LT": '\U0000003C',
+ "Ntilde": '\U000000D1',
+ "Oacute": '\U000000D3',
+ "Ocirc": '\U000000D4',
+ "Ograve": '\U000000D2',
+ "Oslash": '\U000000D8',
+ "Otilde": '\U000000D5',
+ "Ouml": '\U000000D6',
+ "QUOT": '\U00000022',
+ "REG": '\U000000AE',
+ "THORN": '\U000000DE',
+ "Uacute": '\U000000DA',
+ "Ucirc": '\U000000DB',
+ "Ugrave": '\U000000D9',
+ "Uuml": '\U000000DC',
+ "Yacute": '\U000000DD',
+ "aacute": '\U000000E1',
+ "acirc": '\U000000E2',
+ "acute": '\U000000B4',
+ "aelig": '\U000000E6',
+ "agrave": '\U000000E0',
+ "amp": '\U00000026',
+ "aring": '\U000000E5',
+ "atilde": '\U000000E3',
+ "auml": '\U000000E4',
+ "brvbar": '\U000000A6',
+ "ccedil": '\U000000E7',
+ "cedil": '\U000000B8',
+ "cent": '\U000000A2',
+ "copy": '\U000000A9',
+ "curren": '\U000000A4',
+ "deg": '\U000000B0',
+ "divide": '\U000000F7',
+ "eacute": '\U000000E9',
+ "ecirc": '\U000000EA',
+ "egrave": '\U000000E8',
+ "eth": '\U000000F0',
+ "euml": '\U000000EB',
+ "frac12": '\U000000BD',
+ "frac14": '\U000000BC',
+ "frac34": '\U000000BE',
+ "gt": '\U0000003E',
+ "iacute": '\U000000ED',
+ "icirc": '\U000000EE',
+ "iexcl": '\U000000A1',
+ "igrave": '\U000000EC',
+ "iquest": '\U000000BF',
+ "iuml": '\U000000EF',
+ "laquo": '\U000000AB',
+ "lt": '\U0000003C',
+ "macr": '\U000000AF',
+ "micro": '\U000000B5',
+ "middot": '\U000000B7',
+ "nbsp": '\U000000A0',
+ "not": '\U000000AC',
+ "ntilde": '\U000000F1',
+ "oacute": '\U000000F3',
+ "ocirc": '\U000000F4',
+ "ograve": '\U000000F2',
+ "ordf": '\U000000AA',
+ "ordm": '\U000000BA',
+ "oslash": '\U000000F8',
+ "otilde": '\U000000F5',
+ "ouml": '\U000000F6',
+ "para": '\U000000B6',
+ "plusmn": '\U000000B1',
+ "pound": '\U000000A3',
+ "quot": '\U00000022',
+ "raquo": '\U000000BB',
+ "reg": '\U000000AE',
+ "sect": '\U000000A7',
+ "shy": '\U000000AD',
+ "sup1": '\U000000B9',
+ "sup2": '\U000000B2',
+ "sup3": '\U000000B3',
+ "szlig": '\U000000DF',
+ "thorn": '\U000000FE',
+ "times": '\U000000D7',
+ "uacute": '\U000000FA',
+ "ucirc": '\U000000FB',
+ "ugrave": '\U000000F9',
+ "uml": '\U000000A8',
+ "uuml": '\U000000FC',
+ "yacute": '\U000000FD',
+ "yen": '\U000000A5',
+ "yuml": '\U000000FF',
+}
+
+// HTML entities that are two unicode codepoints.
+var entity2 = map[string][2]rune{
+ // TODO(nigeltao): Handle replacements that are wider than their names.
+ // "nLt;": {'\u226A', '\u20D2'},
+ // "nGt;": {'\u226B', '\u20D2'},
+ "NotEqualTilde;": {'\u2242', '\u0338'},
+ "NotGreaterFullEqual;": {'\u2267', '\u0338'},
+ "NotGreaterGreater;": {'\u226B', '\u0338'},
+ "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
+ "NotHumpDownHump;": {'\u224E', '\u0338'},
+ "NotHumpEqual;": {'\u224F', '\u0338'},
+ "NotLeftTriangleBar;": {'\u29CF', '\u0338'},
+ "NotLessLess;": {'\u226A', '\u0338'},
+ "NotLessSlantEqual;": {'\u2A7D', '\u0338'},
+ "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
+ "NotNestedLessLess;": {'\u2AA1', '\u0338'},
+ "NotPrecedesEqual;": {'\u2AAF', '\u0338'},
+ "NotRightTriangleBar;": {'\u29D0', '\u0338'},
+ "NotSquareSubset;": {'\u228F', '\u0338'},
+ "NotSquareSuperset;": {'\u2290', '\u0338'},
+ "NotSubset;": {'\u2282', '\u20D2'},
+ "NotSucceedsEqual;": {'\u2AB0', '\u0338'},
+ "NotSucceedsTilde;": {'\u227F', '\u0338'},
+ "NotSuperset;": {'\u2283', '\u20D2'},
+ "ThickSpace;": {'\u205F', '\u200A'},
+ "acE;": {'\u223E', '\u0333'},
+ "bne;": {'\u003D', '\u20E5'},
+ "bnequiv;": {'\u2261', '\u20E5'},
+ "caps;": {'\u2229', '\uFE00'},
+ "cups;": {'\u222A', '\uFE00'},
+ "fjlig;": {'\u0066', '\u006A'},
+ "gesl;": {'\u22DB', '\uFE00'},
+ "gvertneqq;": {'\u2269', '\uFE00'},
+ "gvnE;": {'\u2269', '\uFE00'},
+ "lates;": {'\u2AAD', '\uFE00'},
+ "lesg;": {'\u22DA', '\uFE00'},
+ "lvertneqq;": {'\u2268', '\uFE00'},
+ "lvnE;": {'\u2268', '\uFE00'},
+ "nGg;": {'\u22D9', '\u0338'},
+ "nGtv;": {'\u226B', '\u0338'},
+ "nLl;": {'\u22D8', '\u0338'},
+ "nLtv;": {'\u226A', '\u0338'},
+ "nang;": {'\u2220', '\u20D2'},
+ "napE;": {'\u2A70', '\u0338'},
+ "napid;": {'\u224B', '\u0338'},
+ "nbump;": {'\u224E', '\u0338'},
+ "nbumpe;": {'\u224F', '\u0338'},
+ "ncongdot;": {'\u2A6D', '\u0338'},
+ "nedot;": {'\u2250', '\u0338'},
+ "nesim;": {'\u2242', '\u0338'},
+ "ngE;": {'\u2267', '\u0338'},
+ "ngeqq;": {'\u2267', '\u0338'},
+ "ngeqslant;": {'\u2A7E', '\u0338'},
+ "nges;": {'\u2A7E', '\u0338'},
+ "nlE;": {'\u2266', '\u0338'},
+ "nleqq;": {'\u2266', '\u0338'},
+ "nleqslant;": {'\u2A7D', '\u0338'},
+ "nles;": {'\u2A7D', '\u0338'},
+ "notinE;": {'\u22F9', '\u0338'},
+ "notindot;": {'\u22F5', '\u0338'},
+ "nparsl;": {'\u2AFD', '\u20E5'},
+ "npart;": {'\u2202', '\u0338'},
+ "npre;": {'\u2AAF', '\u0338'},
+ "npreceq;": {'\u2AAF', '\u0338'},
+ "nrarrc;": {'\u2933', '\u0338'},
+ "nrarrw;": {'\u219D', '\u0338'},
+ "nsce;": {'\u2AB0', '\u0338'},
+ "nsubE;": {'\u2AC5', '\u0338'},
+ "nsubset;": {'\u2282', '\u20D2'},
+ "nsubseteqq;": {'\u2AC5', '\u0338'},
+ "nsucceq;": {'\u2AB0', '\u0338'},
+ "nsupE;": {'\u2AC6', '\u0338'},
+ "nsupset;": {'\u2283', '\u20D2'},
+ "nsupseteqq;": {'\u2AC6', '\u0338'},
+ "nvap;": {'\u224D', '\u20D2'},
+ "nvge;": {'\u2265', '\u20D2'},
+ "nvgt;": {'\u003E', '\u20D2'},
+ "nvle;": {'\u2264', '\u20D2'},
+ "nvlt;": {'\u003C', '\u20D2'},
+ "nvltrie;": {'\u22B4', '\u20D2'},
+ "nvrtrie;": {'\u22B5', '\u20D2'},
+ "nvsim;": {'\u223C', '\u20D2'},
+ "race;": {'\u223D', '\u0331'},
+ "smtes;": {'\u2AAC', '\uFE00'},
+ "sqcaps;": {'\u2293', '\uFE00'},
+ "sqcups;": {'\u2294', '\uFE00'},
+ "varsubsetneq;": {'\u228A', '\uFE00'},
+ "varsubsetneqq;": {'\u2ACB', '\uFE00'},
+ "varsupsetneq;": {'\u228B', '\uFE00'},
+ "varsupsetneqq;": {'\u2ACC', '\uFE00'},
+ "vnsub;": {'\u2282', '\u20D2'},
+ "vnsup;": {'\u2283', '\u20D2'},
+ "vsubnE;": {'\u2ACB', '\uFE00'},
+ "vsubne;": {'\u228A', '\uFE00'},
+ "vsupnE;": {'\u2ACC', '\uFE00'},
+ "vsupne;": {'\u228B', '\uFE00'},
+}
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
new file mode 100644
index 000000000000..04c6bec21073
--- /dev/null
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -0,0 +1,339 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "strings"
+ "unicode/utf8"
+)
+
+// These replacements permit compatibility with old numeric entities that
+// assumed Windows-1252 encoding.
+// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+var replacementTable = [...]rune{
+ '\u20AC', // First entry is what 0x80 should be replaced with.
+ '\u0081',
+ '\u201A',
+ '\u0192',
+ '\u201E',
+ '\u2026',
+ '\u2020',
+ '\u2021',
+ '\u02C6',
+ '\u2030',
+ '\u0160',
+ '\u2039',
+ '\u0152',
+ '\u008D',
+ '\u017D',
+ '\u008F',
+ '\u0090',
+ '\u2018',
+ '\u2019',
+ '\u201C',
+ '\u201D',
+ '\u2022',
+ '\u2013',
+ '\u2014',
+ '\u02DC',
+ '\u2122',
+ '\u0161',
+ '\u203A',
+ '\u0153',
+ '\u009D',
+ '\u017E',
+ '\u0178', // Last entry is 0x9F.
+ // 0x00->'\uFFFD' is handled programmatically.
+ // 0x0D->'\u000D' is a no-op.
+}
+
+// unescapeEntity reads an entity like "<" from b[src:] and writes the
+// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
+// Precondition: b[src] == '&' && dst <= src.
+// attribute should be true if parsing an attribute value.
+func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
+ // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+
+ // i starts at 1 because we already know that s[0] == '&'.
+ i, s := 1, b[src:]
+
+ if len(s) <= 1 {
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if s[i] == '#' {
+ if len(s) <= 3 { // We need to have at least ".".
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+ i++
+ c := s[i]
+ hex := false
+ if c == 'x' || c == 'X' {
+ hex = true
+ i++
+ }
+
+ x := '\x00'
+ for i < len(s) {
+ c = s[i]
+ i++
+ if hex {
+ if '0' <= c && c <= '9' {
+ x = 16*x + rune(c) - '0'
+ continue
+ } else if 'a' <= c && c <= 'f' {
+ x = 16*x + rune(c) - 'a' + 10
+ continue
+ } else if 'A' <= c && c <= 'F' {
+ x = 16*x + rune(c) - 'A' + 10
+ continue
+ }
+ } else if '0' <= c && c <= '9' {
+ x = 10*x + rune(c) - '0'
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ if i <= 3 { // No characters matched.
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if 0x80 <= x && x <= 0x9F {
+ // Replace characters from Windows-1252 with UTF-8 equivalents.
+ x = replacementTable[x-0x80]
+ } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
+ // Replace invalid characters with the replacement character.
+ x = '\uFFFD'
+ }
+
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ }
+
+ // Consume the maximum number of characters possible, with the
+ // consumed characters matching one of the named references.
+
+ for i < len(s) {
+ c := s[i]
+ i++
+ // Lower-cased characters are more common in entities, so we check for them first.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ entityName := string(s[1:i])
+ if entityName == "" {
+ // No-op.
+ } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
+ // No-op.
+ } else if x := entity[entityName]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ } else if x := entity2[entityName]; x[0] != 0 {
+ dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
+ return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
+ } else if !attribute {
+ maxLen := len(entityName) - 1
+ if maxLen > longestEntityWithoutSemicolon {
+ maxLen = longestEntityWithoutSemicolon
+ }
+ for j := maxLen; j > 1; j-- {
+ if x := entity[entityName[:j]]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
+ }
+ }
+ }
+
+ dst1, src1 = dst+i, src+i
+ copy(b[dst:dst1], b[src:src1])
+ return dst1, src1
+}
+
+// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to.
+//
+// Studying the summary table (and T actions in its '>' column) closely, we
+// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
+// start of the comment data. State 52 is after a '!'. The other three states
+// are after a '-'.
+//
+// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
+// - The '>' is after a '!' or '-' (in the unescaped data) or
+// - The '>' is at the start of the comment data (after the opening ""); err != nil {
+ return err
+ }
+ return nil
+ case DoctypeNode:
+ if _, err := w.WriteString("')
+ case RawNode:
+ _, err := w.WriteString(n.Data)
+ return err
+ default:
+ return errors.New("html: unknown node type")
+ }
+
+ // Render the opening tag.
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ for _, a := range n.Attr {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if a.Namespace != "" {
+ if _, err := w.WriteString(a.Namespace); err != nil {
+ return err
+ }
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if _, err := w.WriteString(a.Key); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(`="`); err != nil {
+ return err
+ }
+ if err := escape(w, a.Val); err != nil {
+ return err
+ }
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ }
+ if voidElements[n.Data] {
+ if n.FirstChild != nil {
+ return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
+ }
+ _, err := w.WriteString("/>")
+ return err
+ }
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+
+ // Add initial newline where there is danger of a newline beging ignored.
+ if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
+ switch n.Data {
+ case "pre", "listing", "textarea":
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render any child nodes
+ if childTextNodesAreLiteral(n) {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == TextNode {
+ if _, err := w.WriteString(c.Data); err != nil {
+ return err
+ }
+ } else {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+ if n.Data == "plaintext" {
+ // Don't render anything else. must be the
+ // last element in the file, with no closing tag.
+ return plaintextAbort
+ }
+ } else {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render the closing tag.
+ if _, err := w.WriteString(""); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+}
+
+func childTextNodesAreLiteral(n *Node) bool {
+ // Per WHATWG HTML 13.3, if the parent of the current node is a style,
+ // script, xmp, iframe, noembed, noframes, or plaintext element, and the
+ // current node is a text node, append the value of the node's data
+ // literally. The specification is not explicit about it, but we only
+ // enforce this if we are in the HTML namespace (i.e. when the namespace is
+ // "").
+ // NOTE: we also always include noscript elements, although the
+ // specification states that they should only be rendered as such if
+ // scripting is enabled for the node (which is not something we track).
+ if n.Namespace != "" {
+ return false
+ }
+ switch n.Data {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ return true
+ default:
+ return false
+ }
+}
+
+// writeQuoted writes s to w surrounded by quotes. Normally it will use double
+// quotes, but if s contains a double quote, it will use single quotes.
+// It is used for writing the identifiers in a doctype declaration.
+// In valid HTML, they can't contain both types of quotes.
+func writeQuoted(w writer, s string) error {
+ var q byte = '"'
+ if strings.Contains(s, `"`) {
+ q = '\''
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(s); err != nil {
+ return err
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Section 12.1.2, "Elements", gives this list of void elements. Void elements
+// are those that can't have any contents.
+var voidElements = map[string]bool{
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
+}
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
new file mode 100644
index 000000000000..3c57880d6979
--- /dev/null
+++ b/vendor/golang.org/x/net/html/token.go
@@ -0,0 +1,1272 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html/atom"
+)
+
+// A TokenType is the type of a Token.
+type TokenType uint32
+
+const (
+ // ErrorToken means that an error occurred during tokenization.
+ ErrorToken TokenType = iota
+ // TextToken means a text node.
+ TextToken
+ // A StartTagToken looks like .
+ StartTagToken
+ // An EndTagToken looks like .
+ EndTagToken
+ // A SelfClosingTagToken tag looks like .
+ SelfClosingTagToken
+ // A CommentToken looks like .
+ CommentToken
+ // A DoctypeToken looks like
+ DoctypeToken
+)
+
+// ErrBufferExceeded means that the buffering limit was exceeded.
+var ErrBufferExceeded = errors.New("max buffer exceeded")
+
+// String returns a string representation of the TokenType.
+func (t TokenType) String() string {
+ switch t {
+ case ErrorToken:
+ return "Error"
+ case TextToken:
+ return "Text"
+ case StartTagToken:
+ return "StartTag"
+ case EndTagToken:
+ return "EndTag"
+ case SelfClosingTagToken:
+ return "SelfClosingTag"
+ case CommentToken:
+ return "Comment"
+ case DoctypeToken:
+ return "Doctype"
+ }
+ return "Invalid(" + strconv.Itoa(int(t)) + ")"
+}
+
+// An Attribute is an attribute namespace-key-value triple. Namespace is
+// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
+// does not contain escapable characters like '&', '<' or '>'), and Val is
+// unescaped (it looks like "a"
+ case EndTagToken:
+ return "" + t.tagString() + ">"
+ case SelfClosingTagToken:
+ return "<" + t.tagString() + "/>"
+ case CommentToken:
+ return ""
+ case DoctypeToken:
+ return ""
+ }
+ return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
+}
+
+// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
+// the end is exclusive.
+type span struct {
+ start, end int
+}
+
+// A Tokenizer returns a stream of HTML Tokens.
+type Tokenizer struct {
+ // r is the source of the HTML text.
+ r io.Reader
+ // tt is the TokenType of the current token.
+ tt TokenType
+ // err is the first error encountered during tokenization. It is possible
+ // for tt != Error && err != nil to hold: this means that Next returned a
+ // valid token but the subsequent Next call will return an error token.
+ // For example, if the HTML text input was just "plain", then the first
+ // Next call would set z.err to io.EOF but return a TextToken, and all
+ // subsequent Next calls would return an ErrorToken.
+ // err is never reset. Once it becomes non-nil, it stays non-nil.
+ err error
+ // readErr is the error returned by the io.Reader r. It is separate from
+ // err because it is valid for an io.Reader to return (n int, err1 error)
+ // such that n > 0 && err1 != nil, and callers should always process the
+ // n > 0 bytes before considering the error err1.
+ readErr error
+ // buf[raw.start:raw.end] holds the raw bytes of the current token.
+ // buf[raw.end:] is buffered input that will yield future tokens.
+ raw span
+ buf []byte
+ // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
+ maxBuf int
+ // buf[data.start:data.end] holds the raw bytes of the current token's data:
+ // a text token's text, a tag token's tag name, etc.
+ data span
+ // pendingAttr is the attribute key and value currently being tokenized.
+ // When complete, pendingAttr is pushed onto attr. nAttrReturned is
+ // incremented on each call to TagAttr.
+ pendingAttr [2]span
+ attr [][2]span
+ nAttrReturned int
+ // rawTag is the "script" in "" that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "
" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
+ // convertNUL is whether NUL bytes in the current token's data should
+ // be converted into \ufffd replacement characters.
+ convertNUL bool
+ // allowCDATA is whether CDATA sections are allowed in the current context.
+ allowCDATA bool
+}
+
+// AllowCDATA sets whether or not the tokenizer recognizes as
+// the text "foo". The default value is false, which means to recognize it as
+// a bogus comment "" instead.
+//
+// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
+// only if tokenizing foreign content, such as MathML and SVG. However,
+// tracking foreign-contentness is difficult to do purely in the tokenizer,
+// as opposed to the parser, due to HTML integration points: an
", where "foo" is z.rawTag and
+// is typically something like "script" or "textarea".
+func (z *Tokenizer) readRawOrRCDATA() {
+ if z.rawTag == "script" {
+ z.readScript()
+ z.textIsRaw = true
+ z.rawTag = ""
+ return
+ }
+loop:
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '<' {
+ continue loop
+ }
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '/' {
+ z.raw.end--
+ continue loop
+ }
+ if z.readRawEndTag() || z.err != nil {
+ break loop
+ }
+ }
+ z.data.end = z.raw.end
+ // A textarea's or title's RCDATA can contain escaped entities.
+ z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
+ z.rawTag = ""
+}
+
+// readRawEndTag attempts to read a tag like "", where "foo" is z.rawTag.
+// If it succeeds, it backs up the input position to reconsume the tag and
+// returns true. Otherwise it returns false. The opening "" has already been
+// consumed.
+func (z *Tokenizer) readRawEndTag() bool {
+ for i := 0; i < len(z.rawTag); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ return false
+ }
+ if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
+ z.raw.end--
+ return false
+ }
+ }
+ c := z.readByte()
+ if z.err != nil {
+ return false
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ // The 3 is 2 for the leading "" plus 1 for the trailing character c.
+ z.raw.end -= 3 + len(z.rawTag)
+ return true
+ }
+ z.raw.end--
+ return false
+}
+
+// readScript reads until the next tag, following the byzantine
+// rules for escaping/hiding the closing tag.
+func (z *Tokenizer) readScript() {
+ defer func() {
+ z.data.end = z.raw.end
+ }()
+ var c byte
+
+scriptData:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '<' {
+ goto scriptDataLessThanSign
+ }
+ goto scriptData
+
+scriptDataLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '/':
+ goto scriptDataEndTagOpen
+ case '!':
+ goto scriptDataEscapeStart
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEndTagOpen:
+ if z.readRawEndTag() || z.err != nil {
+ return
+ }
+ goto scriptData
+
+scriptDataEscapeStart:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '-' {
+ goto scriptDataEscapeStartDash
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscapeStartDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '-' {
+ goto scriptDataEscapedDashDash
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscaped:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDashDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedDashDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDashDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ case '>':
+ goto scriptData
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '/' {
+ goto scriptDataEscapedEndTagOpen
+ }
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ goto scriptDataDoubleEscapeStart
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscapedEndTagOpen:
+ if z.readRawEndTag() || z.err != nil {
+ return
+ }
+ goto scriptDataEscaped
+
+scriptDataDoubleEscapeStart:
+ z.raw.end--
+ for i := 0; i < len("script"); i++ {
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c != "script"[i] && c != "SCRIPT"[i] {
+ z.raw.end--
+ goto scriptDataEscaped
+ }
+ }
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ goto scriptDataDoubleEscaped
+ }
+ z.raw.end--
+ goto scriptDataEscaped
+
+scriptDataDoubleEscaped:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDashDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedDashDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDashDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ case '>':
+ goto scriptData
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '/' {
+ goto scriptDataDoubleEscapeEnd
+ }
+ z.raw.end--
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapeEnd:
+ if z.readRawEndTag() {
+ z.raw.end += len("")
+ goto scriptDataEscaped
+ }
+ if z.err != nil {
+ return
+ }
+ goto scriptDataDoubleEscaped
+}
+
+// readComment reads the next comment token starting with ".
+ z.data.end = z.data.start
+ }
+ }()
+
+ var dashCount int
+ beginning := true
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.calculateAbruptCommentDataEnd()
+ return
+ }
+ switch c {
+ case '-':
+ dashCount++
+ continue
+ case '>':
+ if dashCount >= 2 || beginning {
+ z.data.end = z.raw.end - len("-->")
+ return
+ }
+ case '!':
+ if dashCount >= 2 {
+ c = z.readByte()
+ if z.err != nil {
+ z.data.end = z.calculateAbruptCommentDataEnd()
+ return
+ } else if c == '>' {
+ z.data.end = z.raw.end - len("--!>")
+ return
+ } else if c == '-' {
+ dashCount = 1
+ beginning = false
+ continue
+ }
+ }
+ }
+ dashCount = 0
+ beginning = false
+ }
+}
+
+func (z *Tokenizer) calculateAbruptCommentDataEnd() int {
+ raw := z.Raw()
+ const prefixLen = len("", a "", a "" or
+// "':
+ if brackets >= 2 {
+ z.data.end = z.raw.end - len("]]>")
+ return true
+ }
+ brackets = 0
+ default:
+ brackets = 0
+ }
+ }
+}
+
+// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
+// case-insensitively matches any element of ss.
+func (z *Tokenizer) startTagIn(ss ...string) bool {
+loop:
+ for _, s := range ss {
+ if z.data.end-z.data.start != len(s) {
+ continue loop
+ }
+ for i := 0; i < len(s); i++ {
+ c := z.buf[z.data.start+i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ if c != s[i] {
+ continue loop
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// readStartTag reads the next start tag token. The opening "".
+ if z.err == nil && z.buf[z.raw.end-2] == '/' {
+ return SelfClosingTagToken
+ }
+ return StartTagToken
+}
+
+// readTag reads the next tag token and its attributes. If saveAttr, those
+// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
+// The opening "' {
+ break
+ }
+ z.raw.end--
+ z.readTagAttrKey()
+ z.readTagAttrVal()
+ // Save pendingAttr if saveAttr and that attribute has a non-empty key.
+ if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
+ z.attr = append(z.attr, z.pendingAttr)
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ break
+ }
+ }
+}
+
+// readTagName sets z.data to the "div" in "