From f0a2e26c1fd74fd75f10121d26626203a06da810 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Apr 2026 08:29:36 +0000 Subject: [PATCH] Bump github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring Bumps [github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring](https://github.com/prometheus-operator/prometheus-operator) from 0.87.0 to 0.90.1. - [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases) - [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.87.0...v0.90.1) --- updated-dependencies: - dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring dependency-version: 0.90.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 24 +- go.sum | 48 +- .../apis/monitoring/v1/alertmanager_types.go | 138 ++++-- .../pkg/apis/monitoring/v1/http_config.go | 105 ++++- .../apis/monitoring/v1/podmonitor_types.go | 11 +- .../pkg/apis/monitoring/v1/probe_types.go | 27 +- .../apis/monitoring/v1/prometheus_types.go | 227 +++++---- .../monitoring/v1/prometheusrule_types.go | 3 + .../pkg/apis/monitoring/v1/thanos_types.go | 83 +++- .../pkg/apis/monitoring/v1/tls_types.go | 173 +++++++ .../pkg/apis/monitoring/v1/types.go | 386 +++++++-------- .../monitoring/v1/zz_generated.deepcopy.go | 382 ++++++++++----- vendor/golang.org/x/net/http2/http2.go | 16 +- vendor/golang.org/x/net/http2/server.go | 2 + vendor/golang.org/x/net/http2/transport.go | 8 - vendor/golang.org/x/net/http2/writesched.go | 6 + .../net/http2/writesched_priority_rfc7540.go | 5 + .../x/net/http2/writesched_random.go | 2 + .../x/sync/singleflight/singleflight.go | 14 +- .../x/tools/go/ast/inspector/cursor.go | 34 +- .../x/tools/go/ast/inspector/inspector.go | 4 +- .../x/tools/go/ast/inspector/iter.go | 36 +- .../x/tools/go/packages/packages.go | 15 +- .../x/tools/go/types/objectpath/objectpath.go | 16 +- .../x/tools/internal/aliases/aliases.go | 30 +- .../x/tools/internal/aliases/aliases_go122.go | 80 ---- .../x/tools/internal/event/core/event.go | 23 +- .../x/tools/internal/event/keys/keys.go | 439 +++--------------- .../x/tools/internal/event/label/label.go | 11 +- .../x/tools/internal/gcimporter/iexport.go | 11 +- .../x/tools/internal/gcimporter/iimport.go | 4 +- .../tools/internal/gcimporter/ureader_yes.go | 4 +- .../x/tools/internal/stdlib/deps.go | 2 +- .../x/tools/internal/typeparams/free.go | 4 +- .../x/tools/internal/typesinternal/types.go | 3 +- vendor/k8s.io/klog/v2/README.md | 2 - .../klog/v2/internal/serialize/keyvalues.go | 232 ++++----- .../internal/serialize/keyvalues_no_slog.go | 10 +- .../v2/internal/serialize/keyvalues_slog.go | 12 +- vendor/k8s.io/klog/v2/klog.go | 87 +++- vendor/k8s.io/klog/v2/klogr.go | 4 +- vendor/k8s.io/klog/v2/klogr_slog.go | 11 +- .../kube-openapi/pkg/validation/spec/ref.go | 50 -- vendor/k8s.io/utils/buffer/ring_fixed.go | 120 +++++ vendor/k8s.io/utils/strings/slices/slices.go | 51 +- vendor/modules.txt | 36 +- 46 files changed, 1630 insertions(+), 1361 deletions(-) create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go delete mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go122.go create mode 100644 vendor/k8s.io/utils/buffer/ring_fixed.go diff --git a/go.mod b/go.mod index 561f257d9f..6c807e9852 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/onsi/ginkgo/v2 v2.28.1 github.com/onsi/gomega v1.39.1 github.com/openshift/api v0.0.0-20250707164913-2cd5821c9080 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1 github.com/prometheus/common v0.67.5 github.com/sirupsen/logrus v1.9.4 github.com/stretchr/testify v1.11.1 @@ -23,7 +23,7 @@ require ( k8s.io/apimachinery v0.35.3 k8s.io/client-go v0.35.3 k8s.io/kube-aggregator v0.35.3 - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 sigs.k8s.io/controller-runtime v0.23.3 sigs.k8s.io/yaml v1.6.0 ) @@ -86,18 +86,18 @@ require ( go.opentelemetry.io/otel/trace v1.42.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/mod v0.32.0 // indirect - golang.org/x/net v0.51.0 // indirect + golang.org/x/mod v0.33.0 // indirect + golang.org/x/net v0.52.0 // indirect golang.org/x/oauth2 v0.35.0 // indirect - golang.org/x/sync v0.19.0 // indirect + golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.42.0 // indirect - golang.org/x/term v0.40.0 // indirect - golang.org/x/text v0.34.0 // indirect + golang.org/x/term v0.41.0 // indirect + golang.org/x/text v0.35.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.41.0 // indirect + golang.org/x/tools v0.42.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect @@ -108,10 +108,10 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiserver v0.35.3 // indirect k8s.io/component-base v0.35.3 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/klog/v2 v2.140.0 // indirect + k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect ) diff --git a/go.sum b/go.sum index 51a5b83f35..e31904ad73 100644 --- a/go.sum +++ b/go.sum @@ -136,8 +136,8 @@ github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.0 h1:QK37j5ZUtBwbyZkF4BBAs3bQQ1gYKG8e+g1BdNZBr/M= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.0/go.mod h1:WHiLZmOWVop/MoYvRD58LfnPeyE+dcITby/jQjg83Hw= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1 h1:URbjn501/IBFTzPtGXrYDXHi+ZcbP2W60o6JeTrY3vQ= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1/go.mod h1:Gfzi4500QCMnptFIQc8YdDi8YZ4QA0vs22LROWZ3+YU= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= @@ -219,8 +219,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -230,40 +230,40 @@ golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/y golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= -golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= -golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= -golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -304,14 +304,14 @@ k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg= k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c= k8s.io/component-base v0.35.3 h1:mbKbzoIMy7JDWS/wqZobYW1JDVRn/RKRaoMQHP9c4P0= k8s.io/component-base v0.35.3/go.mod h1:IZ8LEG30kPN4Et5NeC7vjNv5aU73ku5MS15iZyvyMYk= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc= +k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0= k8s.io/kube-aggregator v0.35.3 h1:erIo8Dfapd0Fg44XAbgCNioJMtr3Z5mI/G1PSpj9B7Q= k8s.io/kube-aggregator v0.35.3/go.mod h1:lOLyWTEuiKT2kS/Wkj0foq+P+Xt4gs/xkrhz2r33lAQ= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a h1:xCeOEAOoGYl2jnJoHkC3hkbPJgdATINPMAxaynU2Ovg= +k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.23.3 h1:VjB/vhoPoA9l1kEKZHBMnQF33tdCLQKJtydy4iqwZ80= @@ -320,7 +320,7 @@ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5E sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= -sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index 29de797287..d80946fd4d 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -198,10 +198,15 @@ type AlertmanagerSpec struct { // paused if set to true all actions on the underlying managed objects are not // going to be performed, except for delete actions. // +optional - Paused bool `json:"paused,omitempty"` + Paused bool `json:"paused,omitempty"` // nolint:kubeapilinter // nodeSelector defines which Nodes the Pods are scheduled on. // +optional + //nolint:kubeapilinter // standard Kubernetes node selector format NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // schedulerName defines the scheduler to use for Pod scheduling. If not specified, the default scheduler is used. + // +optional + // +kubebuilder:validation:MinLength=1 + SchedulerName string `json:"schedulerName,omitempty"` // resources defines the resource requests and limits of the Pods. // +optional Resources v1.ResourceRequirements `json:"resources,omitempty"` @@ -228,7 +233,7 @@ type AlertmanagerSpec struct { DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` // enableServiceLinks defines whether information about services should be injected into pod's environment variables // +optional - EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` // nolint:kubeapilinter // serviceName defines the service name used by the underlying StatefulSet(s) as the governing service. // If defined, the Service must be created before the Alertmanager resource in the same namespace and it must define a selector that matches the pod labels. // If empty, the operator will create and manage a headless service named `alertmanager-operated` for Alertmanager resources. @@ -245,28 +250,67 @@ type AlertmanagerSpec struct { // does not bind against the Pod IP. Note this is only for the Alertmanager // UI, not the gossip communication. // +optional - ListenLocal bool `json:"listenLocal,omitempty"` - // containers allows injecting additional containers. This is meant to - // allow adding an authentication proxy to an Alertmanager pod. - // Containers described here modify an operator generated container if they - // share the same name and modifications are done via a strategic merge - // patch. The current container names are: `alertmanager` and - // `config-reloader`. Overriding containers is entirely outside the scope - // of what the maintainers will support and by doing so, you accept that - // this behaviour may break at any time without notice. + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // podManagementPolicy defines the policy for creating/deleting pods when + // scaling up and down. + // + // Unlike the default StatefulSet behavior, the default policy is + // `Parallel` to avoid manual intervention in case a pod gets stuck during + // a rollout. + // + // Note that updating this value implies the recreation of the StatefulSet + // which incurs a service outage. + // + // +optional + PodManagementPolicy *PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + + // updateStrategy indicates the strategy that will be employed to update + // Pods in the StatefulSet when a revision is made to statefulset's Pod + // Template. + // + // The default strategy is RollingUpdate. + // + // +optional + UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` + + // containers allows injecting additional containers or modifying operator + // generated containers. This can be used to allow adding an authentication + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `alertmanager` + // * `config-reloader` + // * `thanos-sidecar` + // + // Overriding containers which are managed by the operator require careful + // testing, especially when upgrading to a new version of the operator. + // // +optional Containers []v1.Container `json:"containers,omitempty"` - // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. - // fetch secrets for injection into the Alertmanager configuration from external sources. Any - // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // InitContainers described here modify an operator - // generated init containers if they share the same name and modifications are - // done via a strategic merge patch. The current init container name is: - // `init-config-reloader`. Overriding init containers is entirely outside the - // scope of what the maintainers will support and by doing so, you accept that - // this behaviour may break at any time without notice. + + // initContainers allows injecting initContainers to the Pod definition. Those + // can be used to e.g. fetch secrets for injection into the Prometheus + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator generated init + // containers if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of init container name managed by the operator are: + // * `init-config-reloader`. + // + // Overriding init containers which are managed by the operator require + // careful testing, especially when upgrading to a new version of the + // operator. + // // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` + // priorityClassName assigned to the Pods // +optional PriorityClassName string `json:"priorityClassName,omitempty"` @@ -299,7 +343,7 @@ type AlertmanagerSpec struct { // forceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. // Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. // +optional - ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` + ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` // nolint:kubeapilinter // alertmanagerConfigSelector defines the selector to be used for to merge and configure Alertmanager with. // +optional AlertmanagerConfigSelector *metav1.LabelSelector `json:"alertmanagerConfigSelector,omitempty"` @@ -313,11 +357,19 @@ type AlertmanagerSpec struct { // +optional AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` - // minReadySeconds defines the minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing for it to be considered available. + // minReadySeconds defines the minimum number of seconds for which a newly + // created pod should be ready without any of its container crashing for it + // to be considered available. // // If unset, pods will be considered available as soon as they are ready. // + // When the Alertmanager version is greater than or equal to v0.30.0, the + // duration is also used to delay the first flush of the aggregation + // groups. This delay helps ensuring that all alerts have been resent by + // the Prometheus instances to Alertmanager after a roll-out. It is + // possible to override this behavior passing a custom value via + // `.spec.additionalArgs`. + // // +kubebuilder:validation:Minimum:=0 // +optional MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -326,12 +378,24 @@ type AlertmanagerSpec struct { // +listMapKey=ip // +optional HostAliases []HostAlias `json:"hostAliases,omitempty"` + // hostNetwork controls whether the pod may use the node network namespace. + // + // Make sure to understand the security implications if you want to enable + // it (https://kubernetes.io/docs/concepts/configuration/overview/). + // + // When hostNetwork is enabled, this will set the DNS policy to + // `ClusterFirstWithHostNet` automatically (unless `.spec.dnsPolicy` is set + // to a different value). + // + // +optional + HostNetwork bool `json:"hostNetwork,omitempty"` // nolint:kubeapilinter // web defines the web command line flags when starting Alertmanager. // +optional Web *AlertmanagerWebSpec `json:"web,omitempty"` // limits defines the limits command line flags when starting Alertmanager. // +optional Limits *AlertmanagerLimitsSpec `json:"limits,omitempty"` + // clusterTLS defines the mutual TLS configuration for the Alertmanager cluster's gossip protocol. // // It requires Alertmanager >= 0.24.0. @@ -349,7 +413,7 @@ type AlertmanagerSpec struct { // automountServiceAccountToken defines whether a service account token should be automatically mounted in the pod. // If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials. // +optional - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` // nolint:kubeapilinter // enableFeatures defines the Alertmanager's feature flags. By default, no features are enabled. // Enabling features which are disabled by default is entirely outside the // scope of what the maintainers will support and by doing so, you accept @@ -385,7 +449,7 @@ type AlertmanagerSpec struct { // Starting Kubernetes 1.33, the feature is enabled by default. // // +optional - HostUsers *bool `json:"hostUsers,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter } type AlertmanagerConfigMatcherStrategy struct { @@ -452,7 +516,7 @@ type AlertmanagerGlobalConfig struct { // httpConfig defines the default HTTP configuration. // +optional - HTTPConfig *HTTPConfig `json:"httpConfig,omitempty"` + HTTPConfigWithProxy *HTTPConfigWithProxy `json:"httpConfig,omitempty"` // slackApiUrl defines the default Slack API URL. // +optional @@ -486,7 +550,7 @@ type AlertmanagerGlobalConfig struct { // +optional RocketChatConfig *GlobalRocketChatConfig `json:"rocketChat,omitempty"` - // webex defines the default configuration for Jira. + // webex defines the default configuration for Webex. // +optional WebexConfig *GlobalWebexConfig `json:"webex,omitempty"` @@ -503,7 +567,7 @@ type AlertmanagerStatus struct { // paused defines whether any actions on the underlying managed objects are // being performed. Only delete actions will be performed. // +optional - Paused bool `json:"paused"` + Paused bool `json:"paused"` // nolint:kubeapilinter // replicas defines the total number of non-terminated pods targeted by this Alertmanager // object (their labels match the selector). // +optional @@ -536,6 +600,10 @@ func (a *Alertmanager) ExpectedReplicas() int { return int(*a.Spec.Replicas) } +func (a *Alertmanager) GetAvailableReplicas() int { return int(a.Status.AvailableReplicas) } +func (a *Alertmanager) GetUpdatedReplicas() int { return int(a.Status.UpdatedReplicas) } +func (a *Alertmanager) GetConditions() []Condition { return a.Status.Conditions } + func (a *Alertmanager) SetReplicas(i int) { a.Status.Replicas = int32(i) } func (a *Alertmanager) SetUpdatedReplicas(i int) { a.Status.UpdatedReplicas = int32(i) } func (a *Alertmanager) SetAvailableReplicas(i int) { a.Status.AvailableReplicas = int32(i) } @@ -607,11 +675,19 @@ type GlobalSMTPConfig struct { // requireTLS defines the default SMTP TLS requirement. // Note that Go does not support unencrypted connections to remote SMTP endpoints. // +optional - RequireTLS *bool `json:"requireTLS,omitempty"` + RequireTLS *bool `json:"requireTLS,omitempty"` // nolint:kubeapilinter // tlsConfig defines the default TLS configuration for SMTP receivers // +optional TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // forceImplicitTLS defines whether to force use of implicit TLS (direct TLS connection) for better security. + // true: force use of implicit TLS (direct TLS connection on any port) + // false: force disable implicit TLS (use explicit TLS/STARTTLS if required) + // nil (default): auto-detect based on port (465=implicit, other=explicit) for backward compatibility + // It requires Alertmanager >= v0.31.0. + // +optional + ForceImplicitTLS *bool `json:"forceImplicitTLS,omitempty"` // nolint:kubeapilinter } // GlobalTelegramConfig configures global Telegram parameters. @@ -734,7 +810,3 @@ type ClusterTLSConfig struct { // +required ClientTLS SafeTLSConfig `json:"client"` } - -// URL represents a valid URL -// +kubebuilder:validation:Pattern:="^(http|https)://.+$" -type URL string diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go index 3d7a0b8230..1590d23e09 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/http_config.go @@ -21,8 +21,49 @@ import ( v1 "k8s.io/api/core/v1" ) -// HTTPConfig defines the configuration for the HTTP client. -type HTTPConfig struct { +// HTTPConfigWithProxyAndTLSFiles defines the configuration for the HTTP client +// with proxy configuration and TLS configuration. It is used for +// ServiceMonitor endpoints. +type HTTPConfigWithProxyAndTLSFiles struct { + HTTPConfigWithTLSFiles `json:",inline"` + ProxyConfig `json:",inline"` +} + +// Validate semantically validates the given TLSConfig. +func (c *HTTPConfigWithProxyAndTLSFiles) Validate() error { + if err := c.HTTPConfigWithTLSFiles.Validate(); err != nil { + return err + } + + if err := c.ProxyConfig.Validate(); err != nil { + return err + } + + return nil +} + +// HTTPConfigWithProxy defines the configuration for the HTTP client with proxy +// configuration. It is used for PodMonitor endpoints and Probes. +type HTTPConfigWithProxy struct { + HTTPConfig `json:",inline"` + ProxyConfig `json:",inline"` +} + +// Validate semantically validates the given HTTPConfigWithProxy. +func (hc *HTTPConfigWithProxy) Validate() error { + if hc == nil { + return nil + } + + if err := hc.HTTPConfig.Validate(); err != nil { + return err + } + + return hc.ProxyConfig.Validate() +} + +// HTTPConfigWithoutTLS defines the configuration for the HTTP client. +type HTTPConfigWithoutTLS struct { // authorization configures the Authorization header credentials used by // the client. // @@ -60,27 +101,20 @@ type HTTPConfig struct { // Deprecated: use `authorization` instead. BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - // tlsConfig defines the TLS configuration used by the client. - // - // +optional - TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` - - ProxyConfig `json:",inline"` - // followRedirects defines whether the client should follow HTTP 3xx // redirects. // // +optional - FollowRedirects *bool `json:"followRedirects,omitempty"` + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter // enableHttp2 can be used to disable HTTP2. // // +optional - EnableHTTP2 *bool `json:"enableHttp2,omitempty"` + EnableHTTP2 *bool `json:"enableHttp2,omitempty"` // nolint:kubeapilinter } -// Validate semantically validates the given HTTPConfig. -func (hc *HTTPConfig) Validate() error { +// Validate semantically validates the given HTTPConfigWithoutTLS. +func (hc *HTTPConfigWithoutTLS) Validate() error { if hc == nil { return nil } @@ -118,13 +152,56 @@ func (hc *HTTPConfig) Validate() error { return fmt.Errorf("oauth2: %w", err) } + return nil +} + +// HTTPConfig defines the HTTP configuration + TLS configuration (only from +// secret/configmap references). +type HTTPConfig struct { + HTTPConfigWithoutTLS `json:",inline"` + + // tlsConfig defines the TLS configuration used by the client. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// Validate semantically validates the given HTTPConfig. +func (hc *HTTPConfig) Validate() error { + if hc == nil { + return nil + } + + if err := hc.HTTPConfigWithoutTLS.Validate(); err != nil { + return err + } if err := hc.TLSConfig.Validate(); err != nil { return fmt.Errorf("tlsConfig: %w", err) } - if err := hc.ProxyConfig.Validate(); err != nil { + return nil +} + +// HTTPConfigWithTLSFiles defines HTTP configuration + TLS configuration +// (from secret/configmap references as well as files). +type HTTPConfigWithTLSFiles struct { + HTTPConfigWithoutTLS `json:",inline"` + + // tlsConfig defines TLS configuration used by the client. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// Validate semantically validates the given HTTPConfigWithTLSFiles. +func (c *HTTPConfigWithTLSFiles) Validate() error { + if err := c.HTTPConfigWithoutTLS.Validate(); err != nil { return err } + if err := c.TLSConfig.Validate(); err != nil { + return fmt.Errorf("tlsConfig: %w", err) + } + return nil } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go index 8f439fd11c..d7da9bdcaa 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -267,6 +267,7 @@ type PodMetricsEndpoint struct { // params define optional HTTP URL parameters. // +optional + //nolint:kubeapilinter Params map[string][]string `json:"params,omitempty"` // interval at which Prometheus scrapes the metrics from the target. @@ -286,13 +287,13 @@ type PodMetricsEndpoint struct { // honorLabels when true preserves the metric's labels when they collide // with the target's labels. // +optional - HonorLabels bool `json:"honorLabels,omitempty"` + HonorLabels bool `json:"honorLabels,omitempty"` // nolint:kubeapilinter // honorTimestamps defines whether Prometheus preserves the timestamps // when exposed by the target. // // +optional - HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // nolint:kubeapilinter // trackTimestampsStaleness defines whether Prometheus tracks staleness of // the metrics that have an explicit timestamp present in scraped data. @@ -301,7 +302,7 @@ type PodMetricsEndpoint struct { // It requires Prometheus >= v2.48.0. // // +optional - TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` // nolint:kubeapilinter // metricRelabelings defines the relabeling rules to apply to the // samples before ingestion. @@ -329,7 +330,7 @@ type PodMetricsEndpoint struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase // // +optional - FilterRunning *bool `json:"filterRunning,omitempty"` + FilterRunning *bool `json:"filterRunning,omitempty"` // nolint:kubeapilinter - HTTPConfig `json:",inline"` + HTTPConfigWithProxy `json:",inline"` } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go index 34218f4ca5..dc7ad44ee9 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -17,7 +17,6 @@ package v1 import ( "errors" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -75,10 +74,12 @@ type ProbeSpec struct { // jobName assigned to scraped metrics by default. // +optional JobName string `json:"jobName,omitempty"` + // prober defines the specification for the prober to use for probing targets. // The prober.URL parameter is required. Targets cannot be probed if left empty. // +optional ProberSpec ProberSpec `json:"prober,omitempty"` + // module to use for probing specifying how to probe the target. // Example module configuring in the blackbox exporter: // https://github.com/prometheus/blackbox_exporter/blob/master/example.yml @@ -96,21 +97,7 @@ type ProbeSpec struct { // The value cannot be greater than the scrape interval otherwise the operator will reject the resource. // +optional ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` - // tlsConfig defines the TLS configuration to use when scraping the endpoint. - // +optional - TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` - // bearerTokenSecret defines the secret to mount to read bearer token for scraping targets. The secret - // needs to be in the same namespace as the probe and accessible by - // the Prometheus Operator. - // +optional - BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - // basicAuth allow an endpoint to authenticate over basic authentication. - // More info: https://prometheus.io/docs/operating/configuration/#endpoint - // +optional - BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // oauth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. - // +optional - OAuth2 *OAuth2 `json:"oauth2,omitempty"` + // metricRelabelings defines the RelabelConfig to apply to samples before ingestion. // +optional MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` @@ -133,19 +120,23 @@ type ProbeSpec struct { // +listType=set // +optional ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. // // It requires Prometheus >= v3.0.0. // +optional FallbackScrapeProtocol *ScrapeProtocol `json:"fallbackScrapeProtocol,omitempty"` + // labelLimit defines the per-scrape limit on number of labels that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. // +optional LabelLimit *uint64 `json:"labelLimit,omitempty"` + // labelNameLengthLimit defines the per-scrape limit on length of labels name that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. // +optional LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` + // labelValueLengthLimit defines the per-scrape limit on length of labels value that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. // +optional @@ -153,6 +144,7 @@ type ProbeSpec struct { // +optional NativeHistogramConfig `json:",inline"` + // keepDroppedTargets defines the per-scrape limit on the number of targets dropped by relabeling // that will be kept in memory. 0 means no limit. // @@ -174,6 +166,8 @@ type ProbeSpec struct { // +listType=map // +listMapKey=name Params []ProbeParam `json:"params,omitempty"` + + HTTPConfig `json:",inline"` } // ProbeParam defines specification of extra parameters for a Probe. @@ -225,6 +219,7 @@ type ProbeTargetStaticConfig struct { Targets []string `json:"static,omitempty"` // labels defines all labels assigned to all metrics scraped from the targets. // +optional + //nolint:kubeapilinter Labels map[string]string `json:"labels,omitempty"` // relabelingConfigs defines relabelings to be apply to the label set of the targets before it gets // scraped. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index 2122906ac9..e0e62c2f34 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -21,7 +21,6 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -87,6 +86,12 @@ func (l *Prometheus) GetStatus() PrometheusStatus { return l.Status } +func (p *Prometheus) ExpectedReplicas() int { return p.Spec.CommonPrometheusFields.ExpectedReplicas() } + +func (p *Prometheus) GetAvailableReplicas() int { return int(p.Status.AvailableReplicas) } +func (p *Prometheus) GetUpdatedReplicas() int { return int(p.Status.UpdatedReplicas) } +func (p *Prometheus) GetConditions() []Condition { return p.Status.Conditions } + // +kubebuilder:validation:Enum=OnResource;OnShard type AdditionalLabelSelectors string @@ -222,7 +227,7 @@ type CommonPrometheusFields struct { // paused defines when a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. // +optional - Paused bool `json:"paused,omitempty"` + Paused bool `json:"paused,omitempty"` // nolint:kubeapilinter // image defines the container image name for Prometheus. If specified, it takes precedence // over the `spec.baseImage`, `spec.tag` and `spec.sha` fields. @@ -337,6 +342,7 @@ type CommonPrometheusFields struct { // Labels defined by `spec.replicaExternalLabelName` and // `spec.prometheusExternalLabelName` take precedence over this list. // +optional + //nolint:kubeapilinter ExternalLabels map[string]string `json:"externalLabels,omitempty"` // enableRemoteWriteReceiver defines the Prometheus to be used as a receiver for the Prometheus remote @@ -350,7 +356,7 @@ type CommonPrometheusFields struct { // // It requires Prometheus >= v2.33.0. // +optional - EnableRemoteWriteReceiver bool `json:"enableRemoteWriteReceiver,omitempty"` + EnableRemoteWriteReceiver bool `json:"enableRemoteWriteReceiver,omitempty"` // nolint:kubeapilinter // enableOTLPReceiver defines the Prometheus to be used as a receiver for the OTLP Metrics protocol. // @@ -358,7 +364,7 @@ type CommonPrometheusFields struct { // // It requires Prometheus >= v2.47.0. // +optional - EnableOTLPReceiver *bool `json:"enableOTLPReceiver,omitempty"` + EnableOTLPReceiver *bool `json:"enableOTLPReceiver,omitempty"` // nolint:kubeapilinter // remoteWriteReceiverMessageVersions list of the protobuf message versions to accept when receiving the // remote writes. @@ -430,8 +436,14 @@ type CommonPrometheusFields struct { // nodeSelector defines on which Nodes the Pods are scheduled. // +optional + //nolint:kubeapilinter NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // schedulerName defines the scheduler to use for Pod scheduling. If not specified, the default scheduler is used. + // +optional + // +kubebuilder:validation:MinLength=1 + SchedulerName string `json:"schedulerName,omitempty"` + // serviceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. // +optional @@ -443,7 +455,7 @@ type CommonPrometheusFields struct { // **Warning:** be aware that by default, Prometheus requires the service account token for Kubernetes service discovery. // It is possible to use strategic merge patch to project the service account token into the 'prometheus' container. // +optional - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` // nolint:kubeapilinter // secrets defines a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. @@ -497,11 +509,33 @@ type CommonPrometheusFields struct { // instead of the Pod IP's address. // // +optional - ListenLocal bool `json:"listenLocal,omitempty"` + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // podManagementPolicy defines the policy for creating/deleting pods when + // scaling up and down. + // + // Unlike the default StatefulSet behavior, the default policy is + // `Parallel` to avoid manual intervention in case a pod gets stuck during + // a rollout. + // + // Note that updating this value implies the recreation of the StatefulSet + // which incurs a service outage. + // + // +optional + PodManagementPolicy *PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + + // updateStrategy indicates the strategy that will be employed to update + // Pods in the StatefulSet when a revision is made to statefulset's Pod + // Template. + // + // The default strategy is RollingUpdate. + // + // +optional + UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` // enableServiceLinks defines whether information about services should be injected into pod's environment variables // +optional - EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` // nolint:kubeapilinter // containers allows injecting additional containers or modifying operator // generated containers. This can be used to allow adding an authentication @@ -515,13 +549,14 @@ type CommonPrometheusFields struct { // * `config-reloader` // * `thanos-sidecar` // - // Overriding containers is entirely outside the scope of what the - // maintainers will support and by doing so, you accept that this behaviour - // may break at any time without notice. + // Overriding containers which are managed by the operator require careful + // testing, especially when upgrading to a new version of the operator. + // // +optional Containers []v1.Container `json:"containers,omitempty"` + // initContainers allows injecting initContainers to the Pod definition. Those - // can be used to e.g. fetch secrets for injection into the Prometheus + // can be used to e.g. fetch secrets for injection into the Prometheus // configuration from external sources. Any errors during the execution of // an initContainer will lead to a restart of the Pod. More info: // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ @@ -532,9 +567,10 @@ type CommonPrometheusFields struct { // The names of init container name managed by the operator are: // * `init-config-reloader`. // - // Overriding init containers is entirely outside the scope of what the - // maintainers will support and by doing so, you accept that this behaviour - // may break at any time without notice. + // Overriding init containers which are managed by the operator require + // careful testing, especially when upgrading to a new version of the + // operator. + // // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` @@ -588,19 +624,19 @@ type CommonPrometheusFields struct { // In practice,`OverrideHonorLabels:true` enforces `honorLabels:false` // for all ServiceMonitor, PodMonitor and ScrapeConfig objects. // +optional - OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` + OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` // nolint:kubeapilinter // overrideHonorTimestamps when true, Prometheus ignores the timestamps for all the targets created // from service and pod monitors. // Otherwise the HonorTimestamps field of the service or pod monitor applies. // +optional - OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` + OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` // nolint:kubeapilinter // ignoreNamespaceSelectors when true, `spec.namespaceSelector` from all PodMonitor, ServiceMonitor // and Probe objects will be ignored. They will only discover targets // within the namespace of the PodMonitor, ServiceMonitor and Probe // object. // +optional - IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` + IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` // nolint:kubeapilinter // enforcedNamespaceLabel when not empty, a label will be added to: // @@ -755,7 +791,13 @@ type CommonPrometheusFields struct { // It requires Prometheus >= v3.4.0. // // +optional - ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` // nolint:kubeapilinter + + // scrapeNativeHistograms defines whether to enable scraping of native histograms. + // It requires Prometheus >= v3.8.0. + // + // +optional + ScrapeNativeHistograms *bool `json:"scrapeNativeHistograms,omitempty"` // nolint:kubeapilinter // scrapeClassicHistograms defines whether to scrape a classic histogram that is also exposed as a native histogram. // @@ -764,7 +806,7 @@ type CommonPrometheusFields struct { // It requires Prometheus >= v3.5.0. // // +optional - ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` // nolint:kubeapilinter // minReadySeconds defines the minimum number of seconds for which a newly created Pod should be ready // without any of its container crashing for it to be considered available. @@ -804,7 +846,7 @@ type CommonPrometheusFields struct { // Requires Prometheus v2.11.0 and above. // // +optional - WALCompression *bool `json:"walCompression,omitempty"` + WALCompression *bool `json:"walCompression,omitempty"` // nolint:kubeapilinter // excludedFromEnforcement defines the list of references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects // to be excluded from enforcing a namespace label of origin. @@ -824,7 +866,7 @@ type CommonPrometheusFields struct { // to a different value). // // +optional - HostNetwork bool `json:"hostNetwork,omitempty"` + HostNetwork bool `json:"hostNetwork,omitempty"` // nolint:kubeapilinter // podTargetLabels are appended to the `spec.podTargetLabels` field of all // PodMonitor and ServiceMonitor objects. @@ -838,7 +880,7 @@ type CommonPrometheusFields struct { // in a breaking way. // // +optional - TracingConfig *PrometheusTracingConfig `json:"tracingConfig,omitempty"` + TracingConfig *TracingConfig `json:"tracingConfig,omitempty"` // bodySizeLimit defines per-scrape on response body size. // Only valid in Prometheus versions 2.45.0 and newer. // @@ -981,7 +1023,19 @@ type CommonPrometheusFields struct { // Starting Kubernetes 1.33, the feature is enabled by default. // // +optional - HostUsers *bool `json:"hostUsers,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter +} + +func (cpf CommonPrometheusFields) ExpectedReplicas() int { + replicas := 1 + if cpf.Replicas != nil { + replicas = int(*cpf.Replicas) + } + shards := 1 + if cpf.Shards != nil { + shards = int(*cpf.Shards) + } + return replicas * shards } // Specifies the validation scheme for metric and label names. @@ -1162,7 +1216,7 @@ type PrometheusSpec struct { // When `spec.thanos.objectStorageConfig` or `spec.objectStorageConfigFile` are defined, the operator automatically // disables block compaction to avoid race conditions during block uploads (as the Thanos documentation recommends). // +optional - DisableCompaction bool `json:"disableCompaction,omitempty"` + DisableCompaction bool `json:"disableCompaction,omitempty"` // nolint:kubeapilinter // rules defines the configuration of the Prometheus rules' engine. // +optional @@ -1252,7 +1306,7 @@ type PrometheusSpec struct { // // Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are enabled by default. // +optional - AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` + AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` // nolint:kubeapilinter // exemplars related settings that are runtime reloadable. // It requires to enable the `exemplar-storage` feature flag to be effective. @@ -1280,7 +1334,7 @@ type PrometheusSpec struct { // For more information: // https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis // +optional - EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` + EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` // nolint:kubeapilinter } type WhenScaledRetentionType string @@ -1311,43 +1365,6 @@ type ShardRetentionPolicy struct { Retain *RetainConfig `json:"retain,omitempty"` } -type PrometheusTracingConfig struct { - // clientType defines the client used to export the traces. Supported values are `http` or `grpc`. - // +kubebuilder:validation:Enum=http;grpc - // +optional - ClientType *string `json:"clientType"` - - // endpoint to send the traces to. Should be provided in format :. - // +kubebuilder:validation:MinLength:=1 - // +required - Endpoint string `json:"endpoint"` - - // samplingFraction defines the probability a given trace will be sampled. Must be a float from 0 through 1. - // +optional - SamplingFraction *resource.Quantity `json:"samplingFraction"` - - // insecure if disabled, the client will use a secure connection. - // +optional - Insecure *bool `json:"insecure"` - - // headers defines the key-value pairs to be used as headers associated with gRPC or HTTP requests. - // +optional - Headers map[string]string `json:"headers"` - - // compression key for supported compression types. The only supported value is `gzip`. - // +kubebuilder:validation:Enum=gzip - // +optional - Compression *string `json:"compression"` - - // timeout defines the maximum time the exporter will wait for each batch export. - // +optional - Timeout *Duration `json:"timeout"` - - // tlsConfig to use when sending traces. - // +optional - TLSConfig *TLSConfig `json:"tlsConfig"` -} - // PrometheusStatus is the most recent observed status of the Prometheus cluster. // More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -1356,7 +1373,7 @@ type PrometheusStatus struct { // paused defines whether any actions on the underlying managed objects are // being performed. Only delete actions will be performed. // +optional - Paused bool `json:"paused"` + Paused bool `json:"paused"` // nolint:kubeapilinter // replicas defines the total number of non-terminated pods targeted by this Prometheus deployment // (their labels match the selector). // +optional @@ -1410,7 +1427,7 @@ type AlertingSpec struct { type StorageSpec struct { // disableMountSubPath deprecated: subPath usage will be removed in a future release. // +optional - DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` + DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` // nolint:kubeapilinter // emptyDir to be used by the StatefulSet. // If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. // More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir @@ -1522,21 +1539,21 @@ type ThanosSpec struct { // listenLocal is deprecated: use `grpcListenLocal` and `httpListenLocal` instead. // +optional - ListenLocal bool `json:"listenLocal,omitempty"` + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter // grpcListenLocal defines when true, the Thanos sidecar listens on the loopback interface instead // of the Pod IP's address for the gRPC endpoints. // // It has no effect if `listenLocal` is true. // +optional - GRPCListenLocal bool `json:"grpcListenLocal,omitempty"` + GRPCListenLocal bool `json:"grpcListenLocal,omitempty"` // nolint:kubeapilinter // httpListenLocal when true, the Thanos sidecar listens on the loopback interface instead // of the Pod IP's address for the HTTP endpoints. // // It has no effect if `listenLocal` is true. // +optional - HTTPListenLocal bool `json:"httpListenLocal,omitempty"` + HTTPListenLocal bool `json:"httpListenLocal,omitempty"` // nolint:kubeapilinter // tracingConfig defines the tracing configuration for the Thanos sidecar. // @@ -1562,7 +1579,7 @@ type ThanosSpec struct { // grpcServerTlsConfig defines the TLS parameters for the gRPC server providing the StoreAPI. // - // Note: Currently only the `caFile`, `certFile`, and `keyFile` fields are supported. + // Note: Currently only the `minVersion`, `caFile`, `certFile`, and `keyFile` fields are supported. // // +optional GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` @@ -1628,9 +1645,10 @@ type ThanosSpec struct { // +k8s:openapi-gen=true type RemoteWriteSpec struct { // url defines the URL of the endpoint to send samples to. - // +kubebuilder:validation:MinLength=1 + // + // It must use the HTTP or HTTPS scheme. // +required - URL string `json:"url"` + URL URL `json:"url"` // name of the remote write queue, it must be unique if specified. The // name is used in metrics and logging in order to differentiate queues. @@ -1663,7 +1681,7 @@ type RemoteWriteSpec struct { // It requires Prometheus >= v2.27.0 or Thanos >= v0.24.0. // // +optional - SendExemplars *bool `json:"sendExemplars,omitempty"` + SendExemplars *bool `json:"sendExemplars,omitempty"` // nolint:kubeapilinter // sendNativeHistograms enables sending of native histograms, also known as sparse histograms // over remote write. @@ -1671,7 +1689,7 @@ type RemoteWriteSpec struct { // It requires Prometheus >= v2.40.0 or Thanos >= v0.30.0. // // +optional - SendNativeHistograms *bool `json:"sendNativeHistograms,omitempty"` + SendNativeHistograms *bool `json:"sendNativeHistograms,omitempty"` // nolint:kubeapilinter // remoteTimeout defines the timeout for requests to the remote write endpoint. // +optional @@ -1683,6 +1701,7 @@ type RemoteWriteSpec struct { // It requires Prometheus >= v2.25.0 or Thanos >= v0.24.0. // // +optional + //nolint:kubeapilinter Headers map[string]string `json:"headers,omitempty"` // writeRelabelConfigs defines the list of remote write relabel configurations. @@ -1757,7 +1776,7 @@ type RemoteWriteSpec struct { // It requires Prometheus >= v2.26.0 or Thanos >= v0.24.0. // // +optional - FollowRedirects *bool `json:"followRedirects,omitempty"` + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter // queueConfig allows tuning of the remote write queue parameters. // +optional @@ -1773,7 +1792,7 @@ type RemoteWriteSpec struct { // enableHTTP2 defines whether to enable HTTP2. // +optional - EnableHttp2 *bool `json:"enableHTTP2,omitempty"` + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` // nolint:kubeapilinter // roundRobinDNS controls the DNS resolution behavior for remote-write connections. // When enabled: @@ -1791,7 +1810,7 @@ type RemoteWriteSpec struct { // It requires Prometheus >= v3.1.0 or Thanos >= v0.38.0. // // +optional - RoundRobinDNS *bool `json:"roundRobinDNS,omitempty"` + RoundRobinDNS *bool `json:"roundRobinDNS,omitempty"` // nolint:kubeapilinter } // +kubebuilder:validation:Enum=V1.0;V2.0 @@ -1838,7 +1857,7 @@ type QueueConfig struct { // This is an *experimental feature*, it may change in any upcoming release // in a breaking way. // +optional - RetryOnRateLimit bool `json:"retryOnRateLimit,omitempty"` + RetryOnRateLimit bool `json:"retryOnRateLimit,omitempty"` // nolint:kubeapilinter // sampleAgeLimit drops samples older than the limit. // It requires Prometheus >= v2.50.0 or Thanos >= v0.32.0. // @@ -1871,7 +1890,7 @@ type Sigv4 struct { // It requires Prometheus >= v2.54.0. // // +optional - UseFIPSSTSEndpoint *bool `json:"useFIPSSTSEndpoint,omitempty"` + UseFIPSSTSEndpoint *bool `json:"useFIPSSTSEndpoint,omitempty"` // nolint:kubeapilinter } // AzureAD defines the configuration for remote write's azuread parameters. @@ -1882,11 +1901,11 @@ type AzureAD struct { // +optional Cloud *string `json:"cloud,omitempty"` // managedIdentity defines the Azure User-assigned Managed identity. - // Cannot be set at the same time as `oauth` or `sdk`. + // Cannot be set at the same time as `oauth`, `sdk` or `workloadIdentity`. // +optional ManagedIdentity *ManagedIdentity `json:"managedIdentity,omitempty"` // oauth defines the oauth config that is being used to authenticate. - // Cannot be set at the same time as `managedIdentity` or `sdk`. + // Cannot be set at the same time as `managedIdentity`, `sdk` or `workloadIdentity`. // // It requires Prometheus >= v2.48.0 or Thanos >= v0.31.0. // @@ -1894,11 +1913,22 @@ type AzureAD struct { OAuth *AzureOAuth `json:"oauth,omitempty"` // sdk defines the Azure SDK config that is being used to authenticate. // See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication - // Cannot be set at the same time as `oauth` or `managedIdentity`. + // Cannot be set at the same time as `oauth`, `managedIdentity` or `workloadIdentity`. // // It requires Prometheus >= v2.52.0 or Thanos >= v0.36.0. // +optional SDK *AzureSDK `json:"sdk,omitempty"` + // workloadIdentity defines the Azure Workload Identity authentication. + // Cannot be set at the same time as `oauth`, `managedIdentity`, or `sdk`. + // + // It requires Prometheus >= 3.7.0. Currently not supported by Thanos. + // +optional + WorkloadIdentity *AzureWorkloadIdentity `json:"workloadIdentity,omitempty"` + // scope is the custom OAuth 2.0 scope to request when acquiring tokens. + // It requires Prometheus >= 3.9.0. Currently not supported by Thanos. + // +kubebuilder:validation:Pattern=`^[\w\s:/.\\-]+$` + // +optional + Scope *string `json:"scope,omitempty"` } // AzureOAuth defines the Azure OAuth settings. @@ -1938,6 +1968,19 @@ type AzureSDK struct { TenantID *string `json:"tenantId,omitempty"` } +// AzureWorkloadIdentity defines the Azure Workload Identity authentication configuration. +type AzureWorkloadIdentity struct { + // clientId is the clientID of the Azure Active Directory application. + // +kubebuilder:validation:MinLength=1 + // +required + ClientID string `json:"clientId"` + + // tenantId is the tenant ID of the Azure Active Directory application. + // +kubebuilder:validation:MinLength=1 + // +required + TenantID string `json:"tenantId"` +} + // RemoteReadSpec defines the configuration for Prometheus to read back samples // from a remote endpoint. // +k8s:openapi-gen=true @@ -1958,6 +2001,7 @@ type RemoteReadSpec struct { // requiredMatchers defines an optional list of equality matchers which have to be present // in a selector to query the remote read endpoint. // +optional + //nolint:kubeapilinter RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` // remoteTimeout defines the timeout for requests to the remote read endpoint. @@ -1968,12 +2012,13 @@ type RemoteReadSpec struct { // Be aware that headers that are set by Prometheus itself can't be overwritten. // Only valid in Prometheus versions 2.26.0 and newer. // +optional + //nolint:kubeapilinter Headers map[string]string `json:"headers,omitempty"` // readRecent defines whether reads should be made for queries for time ranges that // the local storage should have complete data for. // +optional - ReadRecent bool `json:"readRecent,omitempty"` + ReadRecent bool `json:"readRecent,omitempty"` // nolint:kubeapilinter // oauth2 configuration for the URL. // @@ -2023,14 +2068,14 @@ type RemoteReadSpec struct { // It requires Prometheus >= v2.26.0. // // +optional - FollowRedirects *bool `json:"followRedirects,omitempty"` + FollowRedirects *bool `json:"followRedirects,omitempty"` // nolint:kubeapilinter // filterExternalLabels defines whether to use the external labels as selectors for the remote read endpoint. // // It requires Prometheus >= v2.34.0. // // +optional - FilterExternalLabels *bool `json:"filterExternalLabels,omitempty"` + FilterExternalLabels *bool `json:"filterExternalLabels,omitempty"` // nolint:kubeapilinter } // RelabelConfig allows dynamic rewriting of the label set for targets, alerts, @@ -2240,7 +2285,7 @@ type AlertmanagerEndpoints struct { // enableHttp2 defines whether to enable HTTP2. // // +optional - EnableHttp2 *bool `json:"enableHttp2,omitempty"` + EnableHttp2 *bool `json:"enableHttp2,omitempty"` // nolint:kubeapilinter // relabelings defines the relabel configuration applied to the discovered Alertmanagers. // @@ -2290,7 +2335,7 @@ type MetadataConfig struct { // send defines whether metric metadata is sent to the remote storage or not. // // +optional - Send bool `json:"send,omitempty"` + Send bool `json:"send,omitempty"` // nolint:kubeapilinter // sendInterval defines how frequently metric metadata is sent to the remote storage. // @@ -2429,7 +2474,7 @@ type ScrapeClass struct { // Only one scrape class can be set as the default. // // +optional - Default *bool `json:"default,omitempty"` + Default *bool `json:"default,omitempty"` // nolint:kubeapilinter // fallbackScrapeProtocol defines the protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type. // It will only apply if the scrape resource doesn't specify any FallbackScrapeProtocol @@ -2510,7 +2555,7 @@ type OTLPConfig struct { // Cannot be true when `promoteResourceAttributes` is defined. // It requires Prometheus >= v3.5.0. // +optional - PromoteAllResourceAttributes *bool `json:"promoteAllResourceAttributes,omitempty"` + PromoteAllResourceAttributes *bool `json:"promoteAllResourceAttributes,omitempty"` // nolint:kubeapilinter // ignoreResourceAttributes defines the list of OpenTelemetry resource attributes to ignore when `promoteAllResourceAttributes` is true. // @@ -2542,18 +2587,18 @@ type OTLPConfig struct { // // It requires Prometheus >= v3.1.0. // +optional - KeepIdentifyingResourceAttributes *bool `json:"keepIdentifyingResourceAttributes,omitempty"` + KeepIdentifyingResourceAttributes *bool `json:"keepIdentifyingResourceAttributes,omitempty"` // nolint:kubeapilinter // convertHistogramsToNHCB defines optional translation of OTLP explicit bucket histograms into native histograms with custom buckets. // It requires Prometheus >= v3.4.0. // +optional - ConvertHistogramsToNHCB *bool `json:"convertHistogramsToNHCB,omitempty"` + ConvertHistogramsToNHCB *bool `json:"convertHistogramsToNHCB,omitempty"` // nolint:kubeapilinter // promoteScopeMetadata controls whether to promote OpenTelemetry scope metadata (i.e. name, version, schema URL, and attributes) to metric labels. // As per the OpenTelemetry specification, the aforementioned scope metadata should be identifying, i.e. made into metric labels. // It requires Prometheus >= v3.6.0. // +optional - PromoteScopeMetadata *bool `json:"promoteScopeMetadata,omitempty"` + PromoteScopeMetadata *bool `json:"promoteScopeMetadata,omitempty"` // nolint:kubeapilinter } // Validate semantically validates the given OTLPConfig section. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go index 576f8d3892..439b57c53f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go @@ -88,6 +88,7 @@ type RuleGroup struct { // It requires Prometheus >= 3.0.0. // The field is ignored for Thanos Ruler. // +optional + //nolint:kubeapilinter Labels map[string]string `json:"labels,omitempty"` // interval defines how often rules in the group are evaluated. // +optional @@ -141,10 +142,12 @@ type Rule struct { KeepFiringFor *NonEmptyDuration `json:"keep_firing_for,omitempty"` // labels defines labels to add or overwrite. // +optional + //nolint:kubeapilinter Labels map[string]string `json:"labels,omitempty"` // annotations defines annotations to add to each alert. // Only valid for alerting rules. // +optional + //nolint:kubeapilinter Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go index 434664c23b..a78f5a3d18 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -111,7 +111,7 @@ type ThanosRulerSpec struct { // paused defines when a ThanosRuler deployment is paused, no actions except for deletion // will be performed on the underlying objects. // +optional - Paused bool `json:"paused,omitempty"` + Paused bool `json:"paused,omitempty"` // nolint:kubeapilinter // replicas defines the number of thanos ruler instances to deploy. // +optional @@ -119,8 +119,14 @@ type ThanosRulerSpec struct { // nodeSelector defines which Nodes the Pods are scheduled on. // +optional + //nolint:kubeapilinter NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // schedulerName defines the scheduler to use for Pod scheduling. If not specified, the default scheduler is used. + // +optional + // +kubebuilder:validation:MinLength=1 + SchedulerName string `json:"schedulerName,omitempty"` + // resources defines the resource requirements for single Pods. // If not provided, no requests/limits will be set // +optional @@ -154,7 +160,7 @@ type ThanosRulerSpec struct { // enableServiceLinks defines whether information about services should be injected into pod's environment variables // +optional - EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` // nolint:kubeapilinter // priorityClassName defines the priority class assigned to the Pods // +optional @@ -210,7 +216,29 @@ type ThanosRulerSpec struct { // listenLocal defines the Thanos ruler listen on loopback, so that it // does not bind against the Pod IP. // +optional - ListenLocal bool `json:"listenLocal,omitempty"` + ListenLocal bool `json:"listenLocal,omitempty"` // nolint:kubeapilinter + + // podManagementPolicy defines the policy for creating/deleting pods when + // scaling up and down. + // + // Unlike the default StatefulSet behavior, the default policy is + // `Parallel` to avoid manual intervention in case a pod gets stuck during + // a rollout. + // + // Note that updating this value implies the recreation of the StatefulSet + // which incurs a service outage. + // + // +optional + PodManagementPolicy *PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + + // updateStrategy indicates the strategy that will be employed to update + // Pods in the StatefulSet when a revision is made to statefulset's Pod + // Template. + // + // The default strategy is RollingUpdate. + // + // +optional + UpdateStrategy *StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` // queryEndpoints defines the list of Thanos Query endpoints from which to query metrics. // @@ -343,22 +371,29 @@ type ThanosRulerSpec struct { // +optional Retention Duration `json:"retention,omitempty"` - // containers allows injecting additional containers or modifying operator generated - // containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or - // to change the behavior of an operator generated container. Containers described here modify - // an operator generated container if they share the same name and modifications are done via a - // strategic merge patch. The current container names are: `thanos-ruler` and `config-reloader`. - // Overriding containers is entirely outside the scope of what the maintainers will support and by doing - // so, you accept that this behaviour may break at any time without notice. + // containers allows injecting additional containers or modifying operator + // generated containers. This can be used to allow adding an authentication + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `thanos-ruler` + // * `config-reloader` + // + // Overriding containers which are managed by the operator require careful + // testing, especially when upgrading to a new version of the operator. + // // +optional Containers []v1.Container `json:"containers,omitempty"` - // initContainers allows adding initContainers to the pod definition. Those can be used to e.g. - // fetch secrets for injection into the ThanosRuler configuration from external sources. Any - // errors during the execution of an initContainer will lead to a restart of the Pod. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // Using initContainers for any use case other then secret fetching is entirely outside the scope - // of what the maintainers will support and by doing so, you accept that this behaviour may break - // at any time without notice. + + // initContainers allows injecting initContainers to the Pod definition. + // Those can be used to e.g. fetch secrets for injection into the + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` @@ -395,6 +430,7 @@ type ThanosRulerSpec struct { // label with the value of the pod's name. // // +optional + //nolint:kubeapilinter Labels map[string]string `json:"labels,omitempty"` // alertDropLabels defines the label names which should be dropped in Thanos Ruler @@ -416,8 +452,9 @@ type ThanosRulerSpec struct { // grpcServerTlsConfig defines the gRPC server from which Thanos Querier reads // recorded rule data. - // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. - // Maps to the '--grpc-server-tls-*' CLI args. + // + // Note: Currently only the `minVersion`, `caFile`, `certFile`, and `keyFile` fields are supported. + // // +optional GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` @@ -523,7 +560,7 @@ type ThanosRulerSpec struct { // Starting Kubernetes 1.33, the feature is enabled by default. // // +optional - HostUsers *bool `json:"hostUsers,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` // nolint:kubeapilinter } // ThanosRulerWebSpec defines the configuration of the ThanosRuler web server. @@ -541,7 +578,7 @@ type ThanosRulerStatus struct { // paused defines whether any actions on the underlying managed objects are // being performed. Only delete actions will be performed. // +optional - Paused bool `json:"paused"` + Paused bool `json:"paused"` // nolint:kubeapilinter // replicas defines the total number of non-terminated pods targeted by this ThanosRuler deployment // (their labels match the selector). // +optional @@ -571,6 +608,10 @@ func (tr *ThanosRuler) ExpectedReplicas() int { return int(*tr.Spec.Replicas) } +func (tr *ThanosRuler) GetAvailableReplicas() int { return int(tr.Status.AvailableReplicas) } +func (tr *ThanosRuler) GetUpdatedReplicas() int { return int(tr.Status.UpdatedReplicas) } +func (tr *ThanosRuler) GetConditions() []Condition { return tr.Status.Conditions } + func (tr *ThanosRuler) SetReplicas(i int) { tr.Status.Replicas = int32(i) } func (tr *ThanosRuler) SetUpdatedReplicas(i int) { tr.Status.UpdatedReplicas = int32(i) } func (tr *ThanosRuler) SetAvailableReplicas(i int) { tr.Status.AvailableReplicas = int32(i) } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go new file mode 100644 index 0000000000..51e6835279 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/tls_types.go @@ -0,0 +1,173 @@ +// Copyright 2025 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "reflect" + "strings" + + v1 "k8s.io/api/core/v1" +) + +// +kubebuilder:validation:Enum=TLS10;TLS11;TLS12;TLS13 +type TLSVersion string + +const ( + TLSVersion10 TLSVersion = "TLS10" + TLSVersion11 TLSVersion = "TLS11" + TLSVersion12 TLSVersion = "TLS12" + TLSVersion13 TLSVersion = "TLS13" +) + +// TLSConfig defines full TLS configuration. +type TLSConfig struct { + SafeTLSConfig `json:",inline"` + TLSFilesConfig `json:",inline"` +} + +// Validate semantically validates the given TLSConfig. +func (c *TLSConfig) Validate() error { + if c == nil { + return nil + } + + if !reflect.ValueOf(c.CA).IsZero() { + if c.CAFile != "" { + return fmt.Errorf("cannot specify both 'caFile' and 'ca'") + } + + if err := c.CA.Validate(); err != nil { + return fmt.Errorf("ca: %w", err) + } + } + + hasCert := !reflect.ValueOf(c.Cert).IsZero() + if hasCert { + if c.CertFile != "" { + return fmt.Errorf("cannot specify both 'certFile' and 'cert'") + } + + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("cert: %w", err) + } + } + + if c.KeyFile != "" && c.KeySecret != nil { + return fmt.Errorf("cannot specify both 'keyFile' and 'keySecret'") + } + + hasCert = hasCert || c.CertFile != "" + hasKey := c.KeyFile != "" || c.KeySecret != nil + + if hasCert && !hasKey { + return fmt.Errorf("cannot specify client cert without client key") + } + + if hasKey && !hasCert { + return fmt.Errorf("cannot specify client key without client cert") + } + + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("'maxVersion' must greater than or equal to 'minVersion'") + } + + return nil +} + +// SafeTLSConfig defines safe TLS configurations. +// +k8s:openapi-gen=true +type SafeTLSConfig struct { + // ca defines the Certificate authority used when verifying server certificates. + // +optional + CA SecretOrConfigMap `json:"ca,omitempty"` + + // cert defines the Client certificate to present when doing client-authentication. + // +optional + Cert SecretOrConfigMap `json:"cert,omitempty"` + + // keySecret defines the Secret containing the client key file for the targets. + // +optional + KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` + + // serverName is used to verify the hostname for the targets. + // +optional + ServerName *string `json:"serverName,omitempty"` + + // insecureSkipVerify defines how to disable target certificate validation. + // +optional + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` // nolint:kubeapilinter + + // minVersion defines the minimum acceptable TLS version. + // + // It requires Prometheus >= v2.35.0 or Thanos >= v0.28.0. + // +optional + MinVersion *TLSVersion `json:"minVersion,omitempty"` + + // maxVersion defines the maximum acceptable TLS version. + // + // It requires Prometheus >= v2.41.0 or Thanos >= v0.31.0. + // +optional + MaxVersion *TLSVersion `json:"maxVersion,omitempty"` +} + +// Validate semantically validates the given SafeTLSConfig. +func (c *SafeTLSConfig) Validate() error { + if c == nil { + return nil + } + + if c.CA != (SecretOrConfigMap{}) { + if err := c.CA.Validate(); err != nil { + return fmt.Errorf("ca %s: %w", c.CA.String(), err) + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if err := c.Cert.Validate(); err != nil { + return fmt.Errorf("cert %s: %w", c.Cert.String(), err) + } + } + + if c.Cert != (SecretOrConfigMap{}) && c.KeySecret == nil { + return fmt.Errorf("client cert specified without client key") + } + + if c.KeySecret != nil && c.Cert == (SecretOrConfigMap{}) { + return fmt.Errorf("client key specified without client cert") + } + + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("maxVersion must more than or equal to minVersion") + } + + return nil +} + +// TLSFilesConfig extends the TLS configuration with file parameters. +// +k8s:openapi-gen=true +type TLSFilesConfig struct { + // caFile defines the path to the CA cert in the Prometheus container to use for the targets. + // +optional + CAFile string `json:"caFile,omitempty"` + // certFile defines the path to the client cert file in the Prometheus container for the targets. + // +optional + CertFile string `json:"certFile,omitempty"` + // keyFile defines the path to the client key file in the Prometheus container for the targets. + // +optional + KeyFile string `json:"keyFile,omitempty"` +} + +// diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go index bddbed14bb..36db7f95d3 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -34,6 +34,10 @@ const ( Version = "v1" ) +// URL represents a valid URL +// +kubebuilder:validation:Pattern:="^(http|https)://.+$" +type URL string + // ByteSize is a valid memory size type based on powers-of-2, so 1KB is 1024B. // Supported units: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB Ex: `512MB`. // +kubebuilder:validation:Pattern:="(^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$" @@ -49,12 +53,6 @@ func (bs *ByteSize) IsEmpty() bool { // +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" type Duration string -// DurationPointer is a helper function to parse a Duration string into a *Duration. -func DurationPointer(s string) *Duration { - d := Duration(s) - return &d -} - // NonEmptyDuration is a valid time duration that can be parsed by Prometheus model.ParseDuration() function. // Compared to Duration, NonEmptyDuration enforces a minimum length of 1. // Supported units: y, w, d, h, m, s, ms @@ -109,14 +107,14 @@ type ProxyConfig struct { // // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. // +optional - ProxyFromEnvironment *bool `json:"proxyFromEnvironment,omitempty"` + ProxyFromEnvironment *bool `json:"proxyFromEnvironment,omitempty"` // nolint:kubeapilinter // proxyConnectHeader optionally specifies headers to send to // proxies during CONNECT requests. // // It requires Prometheus >= v2.43.0, Alertmanager >= v0.25.0 or Thanos >= v0.32.0. // +optional // +mapType:=atomic - ProxyConnectHeader map[string][]v1.SecretKeySelector `json:"proxyConnectHeader,omitempty"` + ProxyConnectHeader map[string][]v1.SecretKeySelector `json:"proxyConnectHeader,omitempty"` //nolint:kubeapilinter } // Validate semantically validates the given ProxyConfig. @@ -227,7 +225,7 @@ type ArbitraryFSAccessThroughSMsConfig struct { // Setting this to true enhances security by preventing potential credential theft attacks. // // +optional - Deny bool `json:"deny,omitempty"` + Deny bool `json:"deny,omitempty"` // nolint:kubeapilinter } // Condition represents the state of the resources associated with the @@ -330,6 +328,7 @@ type EmbeddedObjectMetadata struct { // and services. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional + //nolint:kubeapilinter Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // annotations defines an unstructured key value map stored with a resource that may be @@ -337,6 +336,7 @@ type EmbeddedObjectMetadata struct { // queryable and should be preserved when modifying objects. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // +optional + //nolint:kubeapilinter Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` } @@ -358,7 +358,7 @@ type WebHTTPConfig struct { // When TLSConfig is not configured, HTTP/2 will be disabled. // Whenever the value of the field changes, a rolling update will be triggered. // +optional - HTTP2 *bool `json:"http2,omitempty"` + HTTP2 *bool `json:"http2,omitempty"` // nolint:kubeapilinter // headers defines a list of headers that can be added to HTTP responses. // +optional Headers *WebHTTPHeaders `json:"headers,omitempty"` @@ -483,7 +483,7 @@ type WebTLSConfig struct { // the order of elements in cipherSuites, is used. // // +optional - PreferServerCipherSuites *bool `json:"preferServerCipherSuites,omitempty"` + PreferServerCipherSuites *bool `json:"preferServerCipherSuites,omitempty"` // nolint:kubeapilinter // curvePreferences defines elliptic curves that will be used in an ECDHE handshake, in preference // order. @@ -572,6 +572,7 @@ type Endpoint struct { // params define optional HTTP URL parameters. // +optional + //nolint:kubeapilinter Params map[string][]string `json:"params,omitempty"` // interval at which Prometheus scrapes the metrics from the target. @@ -588,44 +589,16 @@ type Endpoint struct { // +optional ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` - // tlsConfig defines the TLS configuration to use when scraping the target. - // - // +optional - TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` - - // bearerTokenFile defines the file to read bearer token for scraping the target. - // - // Deprecated: use `authorization` instead. - // +optional - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - - // bearerTokenSecret defines a key of a Secret containing the bearer - // token for scraping targets. The secret needs to be in the same namespace - // as the ServiceMonitor object and readable by the Prometheus Operator. - // - // +optional - // - // Deprecated: use `authorization` instead. - BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - - // authorization configures the Authorization header credentials to use when - // scraping the target. - // - // Cannot be set at the same time as `basicAuth`, or `oauth2`. - // - // +optional - Authorization *SafeAuthorization `json:"authorization,omitempty"` - // honorLabels defines when true the metric's labels when they collide // with the target's labels. // +optional - HonorLabels bool `json:"honorLabels,omitempty"` + HonorLabels bool `json:"honorLabels,omitempty"` // nolint:kubeapilinter // honorTimestamps defines whether Prometheus preserves the timestamps // when exposed by the target. // // +optional - HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // nolint:kubeapilinter // trackTimestampsStaleness defines whether Prometheus tracks staleness of // the metrics that have an explicit timestamp present in scraped data. @@ -634,24 +607,7 @@ type Endpoint struct { // It requires Prometheus >= v2.48.0. // // +optional - TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` - - // basicAuth defines the Basic Authentication credentials to use when - // scraping the target. - // - // Cannot be set at the same time as `authorization`, or `oauth2`. - // - // +optional - BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - - // oauth2 defines the OAuth2 settings to use when scraping the target. - // - // It requires Prometheus >= 2.27.0. - // - // Cannot be set at the same time as `authorization`, or `basicAuth`. - // - // +optional - OAuth2 *OAuth2 `json:"oauth2,omitempty"` + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` // nolint:kubeapilinter // metricRelabelings defines the relabeling rules to apply to the // samples before ingestion. @@ -671,20 +627,6 @@ type Endpoint struct { // +optional RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` - // +optional - ProxyConfig `json:",inline"` - - // followRedirects defines whether the scrape requests should follow HTTP - // 3xx redirects. - // - // +optional - FollowRedirects *bool `json:"followRedirects,omitempty"` - - // enableHttp2 can be used to disable HTTP2 when scraping the target. - // - // +optional - EnableHttp2 *bool `json:"enableHttp2,omitempty"` - // filterRunning when true, the pods which are not running (e.g. either in Failed or // Succeeded state) are dropped during the target discovery. // @@ -693,7 +635,15 @@ type Endpoint struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase // // +optional - FilterRunning *bool `json:"filterRunning,omitempty"` + FilterRunning *bool `json:"filterRunning,omitempty"` // nolint:kubeapilinter + + // bearerTokenFile defines the file to read bearer token for scraping the target. + // + // Deprecated: use `authorization` instead. + // +optional + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + + HTTPConfigWithProxyAndTLSFiles `json:",inline"` } type AttachMetadata struct { @@ -704,7 +654,7 @@ type AttachMetadata struct { // permissions on the `Nodes` objects. // // +optional - Node *bool `json:"node,omitempty"` + Node *bool `json:"node,omitempty"` // nolint:kubeapilinter } // OAuth2 configures OAuth2 settings. @@ -736,6 +686,7 @@ type OAuth2 struct { // URL. // // +optional + //nolint:kubeapilinter EndpointParams map[string]string `json:"endpointParams,omitempty"` // tlsConfig defines the TLS configuration to use when connecting to the OAuth2 server. @@ -828,147 +779,6 @@ func (c *SecretOrConfigMap) String() string { return "" } -// +kubebuilder:validation:Enum=TLS10;TLS11;TLS12;TLS13 -type TLSVersion string - -const ( - TLSVersion10 TLSVersion = "TLS10" - TLSVersion11 TLSVersion = "TLS11" - TLSVersion12 TLSVersion = "TLS12" - TLSVersion13 TLSVersion = "TLS13" -) - -// SafeTLSConfig specifies safe TLS configuration parameters. -// +k8s:openapi-gen=true -type SafeTLSConfig struct { - // ca defines the Certificate authority used when verifying server certificates. - // +optional - CA SecretOrConfigMap `json:"ca,omitempty"` - - // cert defines the Client certificate to present when doing client-authentication. - // +optional - Cert SecretOrConfigMap `json:"cert,omitempty"` - - // keySecret defines the Secret containing the client key file for the targets. - // +optional - KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` - - // serverName is used to verify the hostname for the targets. - // +optional - ServerName *string `json:"serverName,omitempty"` - - // insecureSkipVerify defines how to disable target certificate validation. - // +optional - InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` - - // minVersion defines the minimum acceptable TLS version. - // - // It requires Prometheus >= v2.35.0 or Thanos >= v0.28.0. - // +optional - MinVersion *TLSVersion `json:"minVersion,omitempty"` - - // maxVersion defines the maximum acceptable TLS version. - // - // It requires Prometheus >= v2.41.0 or Thanos >= v0.31.0. - // +optional - MaxVersion *TLSVersion `json:"maxVersion,omitempty"` -} - -// Validate semantically validates the given SafeTLSConfig. -func (c *SafeTLSConfig) Validate() error { - if c == nil { - return nil - } - - if c.CA != (SecretOrConfigMap{}) { - if err := c.CA.Validate(); err != nil { - return fmt.Errorf("ca %s: %w", c.CA.String(), err) - } - } - - if c.Cert != (SecretOrConfigMap{}) { - if err := c.Cert.Validate(); err != nil { - return fmt.Errorf("cert %s: %w", c.Cert.String(), err) - } - } - - if c.Cert != (SecretOrConfigMap{}) && c.KeySecret == nil { - return fmt.Errorf("client cert specified without client key") - } - - if c.KeySecret != nil && c.Cert == (SecretOrConfigMap{}) { - return fmt.Errorf("client key specified without client cert") - } - - if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { - return fmt.Errorf("maxVersion must more than or equal to minVersion") - } - - return nil -} - -// TLSConfig extends the safe TLS configuration with file parameters. -// +k8s:openapi-gen=true -type TLSConfig struct { - // +optional - SafeTLSConfig `json:",inline"` - // caFile defines the path to the CA cert in the Prometheus container to use for the targets. - // +optional - CAFile string `json:"caFile,omitempty"` - // certFile defines the path to the client cert file in the Prometheus container for the targets. - // +optional - CertFile string `json:"certFile,omitempty"` - // keyFile defines the path to the client key file in the Prometheus container for the targets. - // +optional - KeyFile string `json:"keyFile,omitempty"` -} - -// Validate semantically validates the given TLSConfig. -func (c *TLSConfig) Validate() error { - if c == nil { - return nil - } - - if c.CA != (SecretOrConfigMap{}) { - if c.CAFile != "" { - return fmt.Errorf("cannot specify both caFile and ca") - } - if err := c.CA.Validate(); err != nil { - return fmt.Errorf("SecretOrConfigMap ca: %w", err) - } - } - - if c.Cert != (SecretOrConfigMap{}) { - if c.CertFile != "" { - return fmt.Errorf("cannot specify both certFile and cert") - } - if err := c.Cert.Validate(); err != nil { - return fmt.Errorf("SecretOrConfigMap cert: %w", err) - } - } - - if c.KeyFile != "" && c.KeySecret != nil { - return fmt.Errorf("cannot specify both keyFile and keySecret") - } - - hasCert := c.CertFile != "" || c.Cert != (SecretOrConfigMap{}) - hasKey := c.KeyFile != "" || c.KeySecret != nil - - if hasCert && !hasKey { - return fmt.Errorf("cannot specify client cert without client key") - } - - if hasKey && !hasCert { - return fmt.Errorf("cannot specify client key without client cert") - } - - if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { - return fmt.Errorf("maxVersion must more than or equal to minVersion") - } - - return nil -} - // NamespaceSelector is a selector for selecting either all namespaces or a // list of namespaces. // If `any` is true, it takes precedence over `matchNames`. @@ -979,7 +789,7 @@ type NamespaceSelector struct { // any defines the boolean describing whether all namespaces are selected in contrast to a // list restricting them. // +optional - Any bool `json:"any,omitempty"` + Any bool `json:"any,omitempty"` // nolint:kubeapilinter // matchNames defines the list of namespace names to select from. // +optional MatchNames []string `json:"matchNames,omitempty"` @@ -1014,13 +824,19 @@ const ( // NativeHistogramConfig extends the native histogram configuration settings. // +k8s:openapi-gen=true type NativeHistogramConfig struct { + // scrapeNativeHistograms defines whether to enable scraping of native histograms. + // It requires Prometheus >= v3.8.0. + // + // +optional + ScrapeNativeHistograms *bool `json:"scrapeNativeHistograms,omitempty"` // nolint:kubeapilinter + // scrapeClassicHistograms defines whether to scrape a classic histogram that is also exposed as a native histogram. // It requires Prometheus >= v2.45.0. // // Notice: `scrapeClassicHistograms` corresponds to the `always_scrape_classic_histograms` field in the Prometheus configuration. // // +optional - ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` + ScrapeClassicHistograms *bool `json:"scrapeClassicHistograms,omitempty"` // nolint:kubeapilinter // nativeHistogramBucketLimit defines ff there are more than this many buckets in a native histogram, // buckets will be merged to stay within the limit. @@ -1040,7 +856,7 @@ type NativeHistogramConfig struct { // It requires Prometheus >= v3.0.0. // // +optional - ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` + ConvertClassicHistogramsToNHCB *bool `json:"convertClassicHistogramsToNHCB,omitempty"` // nolint:kubeapilinter } // +kubebuilder:validation:Enum=RelabelConfig;RoleSelector @@ -1140,3 +956,135 @@ const ( SchemeHTTP Scheme = "HTTP" SchemeHTTPS Scheme = "HTTPS" ) + +// +kubebuilder:validation:Enum=OrderedReady;Parallel +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement PodManagementPolicyType = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy used when updating the +// StatefulSet. It includes any additional parameters necessary to perform the +// update for the indicated strategy. +// +// +kubebuilder:validation:XValidation:rule="!(self.type != 'RollingUpdate' && has(self.rollingUpdate))",message="rollingUpdate requires type to be RollingUpdate" +type StatefulSetUpdateStrategy struct { + // type indicates the type of the StatefulSetUpdateStrategy. + // + // Default is RollingUpdate. + // + // +required + Type StatefulSetUpdateStrategyType `json:"type"` + + // rollingUpdate is used to communicate parameters when type is RollingUpdate. + // + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty"` +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for the RollingUpdate strategy. +type RollingUpdateStatefulSetStrategy struct { + // maxUnavailable is the maximum number of pods that can be unavailable + // during the update. The value can be an absolute number (ex: 5) or a + // percentage of desired pods (ex: 10%). Absolute number is calculated from + // percentage by rounding up. This can not be 0. Defaults to 1. This field + // is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the + // range 0 to Replicas-1. That means if there is any unavailable pod in + // the range 0 to Replicas-1, it will be counted towards MaxUnavailable. + // + // +kubebuilder:validation:XIntOrString + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet pods. +// +// +kubebuilder:validation:Enum=OnDelete;RollingUpdate +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy, new Pods will be created from + // the the specification version indicated by the StatefulSet's + // currentRevision. + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" +) + +type TracingConfig struct { + // clientType defines the client used to export the traces. Supported values are `HTTP` and `GRPC`. + // +kubebuilder:validation:Enum=http;grpc;HTTP;GRPC + // +optional + ClientType *string `json:"clientType",omitempty` + + // endpoint to send the traces to. Should be provided in format :. + // +kubebuilder:validation:MinLength:=1 + // +required + Endpoint string `json:"endpoint"` + + // samplingFraction defines the probability a given trace will be sampled. Must be a float from 0 through 1. + // +optional + SamplingFraction *resource.Quantity `json:"samplingFraction",omitempty` + + // insecure if disabled, the client will use a secure connection. + // +optional + Insecure *bool `json:"insecure",omitempty` // nolint:kubeapilinter + + // headers defines the key-value pairs to be used as headers associated with gRPC or HTTP requests. + // +optional + Headers map[string]string `json:"headers"` + + // compression key for supported compression types. The only supported value is `Gzip`. + // +kubebuilder:validation:Enum=gzip;Gzip + // +optional + Compression *string `json:"compression",omitempty` + + // timeout defines the maximum time the exporter will wait for each batch export. + // +optional + Timeout *Duration `json:"timeout",omitempty` + + // tlsConfig to use when sending traces. + // +optional + TLSConfig *TLSConfig `json:"tlsConfig",omitempty` +} + +// Validate semantically validates the given TracingConfig. +func (tc *TracingConfig) Validate() error { + if tc == nil { + return nil + } + + if err := tc.TLSConfig.Validate(); err != nil { + return err + } + + if tc.SamplingFraction != nil { + min, _ := resource.ParseQuantity("0") + max, _ := resource.ParseQuantity("1") + + if tc.SamplingFraction.Cmp(min) < 0 || tc.SamplingFraction.Cmp(max) > 0 { + return fmt.Errorf("`samplingFraction` must be between 0 and 1") + } + } + + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go index 03b17df8bc..fec9816d4a 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -228,9 +228,9 @@ func (in *AlertmanagerGlobalConfig) DeepCopyInto(out *AlertmanagerGlobalConfig) *out = new(GlobalSMTPConfig) (*in).DeepCopyInto(*out) } - if in.HTTPConfig != nil { - in, out := &in.HTTPConfig, &out.HTTPConfig - *out = new(HTTPConfig) + if in.HTTPConfigWithProxy != nil { + in, out := &in.HTTPConfigWithProxy, &out.HTTPConfigWithProxy + *out = new(HTTPConfigWithProxy) (*in).DeepCopyInto(*out) } if in.SlackAPIURL != nil { @@ -453,6 +453,16 @@ func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { *out = new(string) **out = **in } + if in.PodManagementPolicy != nil { + in, out := &in.PodManagementPolicy, &out.PodManagementPolicy + *out = new(PodManagementPolicyType) + **out = **in + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(StatefulSetUpdateStrategy) + (*in).DeepCopyInto(*out) + } if in.Containers != nil { in, out := &in.Containers, &out.Containers *out = make([]corev1.Container, len(*in)) @@ -694,6 +704,16 @@ func (in *AzureAD) DeepCopyInto(out *AzureAD) { *out = new(AzureSDK) (*in).DeepCopyInto(*out) } + if in.WorkloadIdentity != nil { + in, out := &in.WorkloadIdentity, &out.WorkloadIdentity + *out = new(AzureWorkloadIdentity) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAD. @@ -742,6 +762,21 @@ func (in *AzureSDK) DeepCopy() *AzureSDK { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureWorkloadIdentity) DeepCopyInto(out *AzureWorkloadIdentity) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureWorkloadIdentity. +func (in *AzureWorkloadIdentity) DeepCopy() *AzureWorkloadIdentity { + if in == nil { + return nil + } + out := new(AzureWorkloadIdentity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { *out = *in @@ -979,6 +1014,16 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { *out = new(PodDNSConfig) (*in).DeepCopyInto(*out) } + if in.PodManagementPolicy != nil { + in, out := &in.PodManagementPolicy, &out.PodManagementPolicy + *out = new(PodManagementPolicyType) + **out = **in + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(StatefulSetUpdateStrategy) + (*in).DeepCopyInto(*out) + } if in.EnableServiceLinks != nil { in, out := &in.EnableServiceLinks, &out.EnableServiceLinks *out = new(bool) @@ -1054,6 +1099,11 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { *out = new(bool) **out = **in } + if in.ScrapeNativeHistograms != nil { + in, out := &in.ScrapeNativeHistograms, &out.ScrapeNativeHistograms + *out = new(bool) + **out = **in + } if in.ScrapeClassicHistograms != nil { in, out := &in.ScrapeClassicHistograms, &out.ScrapeClassicHistograms *out = new(bool) @@ -1093,7 +1143,7 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { } if in.TracingConfig != nil { in, out := &in.TracingConfig, &out.TracingConfig - *out = new(PrometheusTracingConfig) + *out = new(TracingConfig) (*in).DeepCopyInto(*out) } if in.BodySizeLimit != nil { @@ -1366,21 +1416,6 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = outVal } } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - (*in).DeepCopyInto(*out) - } - if in.BearerTokenSecret != nil { - in, out := &in.BearerTokenSecret, &out.BearerTokenSecret - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.Authorization != nil { - in, out := &in.Authorization, &out.Authorization - *out = new(SafeAuthorization) - (*in).DeepCopyInto(*out) - } if in.HonorTimestamps != nil { in, out := &in.HonorTimestamps, &out.HonorTimestamps *out = new(bool) @@ -1391,16 +1426,6 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = new(bool) **out = **in } - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.OAuth2 != nil { - in, out := &in.OAuth2, &out.OAuth2 - *out = new(OAuth2) - (*in).DeepCopyInto(*out) - } if in.MetricRelabelConfigs != nil { in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs *out = make([]RelabelConfig, len(*in)) @@ -1415,22 +1440,12 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) - if in.FollowRedirects != nil { - in, out := &in.FollowRedirects, &out.FollowRedirects - *out = new(bool) - **out = **in - } - if in.EnableHttp2 != nil { - in, out := &in.EnableHttp2, &out.EnableHttp2 - *out = new(bool) - **out = **in - } if in.FilterRunning != nil { in, out := &in.FilterRunning, &out.FilterRunning *out = new(bool) **out = **in } + in.HTTPConfigWithProxyAndTLSFiles.DeepCopyInto(&out.HTTPConfigWithProxyAndTLSFiles) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. @@ -1561,6 +1576,11 @@ func (in *GlobalSMTPConfig) DeepCopyInto(out *GlobalSMTPConfig) { *out = new(SafeTLSConfig) (*in).DeepCopyInto(*out) } + if in.ForceImplicitTLS != nil { + in, out := &in.ForceImplicitTLS, &out.ForceImplicitTLS + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSMTPConfig. @@ -1670,6 +1690,82 @@ func (in *GlobalWebexConfig) DeepCopy() *GlobalWebexConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { + *out = *in + in.HTTPConfigWithoutTLS.DeepCopyInto(&out.HTTPConfigWithoutTLS) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfig. +func (in *HTTPConfig) DeepCopy() *HTTPConfig { + if in == nil { + return nil + } + out := new(HTTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithProxy) DeepCopyInto(out *HTTPConfigWithProxy) { + *out = *in + in.HTTPConfig.DeepCopyInto(&out.HTTPConfig) + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithProxy. +func (in *HTTPConfigWithProxy) DeepCopy() *HTTPConfigWithProxy { + if in == nil { + return nil + } + out := new(HTTPConfigWithProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithProxyAndTLSFiles) DeepCopyInto(out *HTTPConfigWithProxyAndTLSFiles) { + *out = *in + in.HTTPConfigWithTLSFiles.DeepCopyInto(&out.HTTPConfigWithTLSFiles) + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithProxyAndTLSFiles. +func (in *HTTPConfigWithProxyAndTLSFiles) DeepCopy() *HTTPConfigWithProxyAndTLSFiles { + if in == nil { + return nil + } + out := new(HTTPConfigWithProxyAndTLSFiles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithTLSFiles) DeepCopyInto(out *HTTPConfigWithTLSFiles) { + *out = *in + in.HTTPConfigWithoutTLS.DeepCopyInto(&out.HTTPConfigWithoutTLS) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithTLSFiles. +func (in *HTTPConfigWithTLSFiles) DeepCopy() *HTTPConfigWithTLSFiles { + if in == nil { + return nil + } + out := new(HTTPConfigWithTLSFiles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigWithoutTLS) DeepCopyInto(out *HTTPConfigWithoutTLS) { *out = *in if in.Authorization != nil { in, out := &in.Authorization, &out.Authorization @@ -1691,12 +1787,6 @@ func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(SafeTLSConfig) - (*in).DeepCopyInto(*out) - } - in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) if in.FollowRedirects != nil { in, out := &in.FollowRedirects, &out.FollowRedirects *out = new(bool) @@ -1709,12 +1799,12 @@ func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfig. -func (in *HTTPConfig) DeepCopy() *HTTPConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigWithoutTLS. +func (in *HTTPConfigWithoutTLS) DeepCopy() *HTTPConfigWithoutTLS { if in == nil { return nil } - out := new(HTTPConfig) + out := new(HTTPConfigWithoutTLS) in.DeepCopyInto(out) return out } @@ -1817,6 +1907,11 @@ func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NativeHistogramConfig) DeepCopyInto(out *NativeHistogramConfig) { *out = *in + if in.ScrapeNativeHistograms != nil { + in, out := &in.ScrapeNativeHistograms, &out.ScrapeNativeHistograms + *out = new(bool) + **out = **in + } if in.ScrapeClassicHistograms != nil { in, out := &in.ScrapeClassicHistograms, &out.ScrapeClassicHistograms *out = new(bool) @@ -2069,7 +2164,7 @@ func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { *out = new(bool) **out = **in } - in.HTTPConfig.DeepCopyInto(&out.HTTPConfig) + in.HTTPConfigWithProxy.DeepCopyInto(&out.HTTPConfigWithProxy) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsEndpoint. @@ -2283,22 +2378,6 @@ func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { *out = *in in.ProberSpec.DeepCopyInto(&out.ProberSpec) in.Targets.DeepCopyInto(&out.Targets) - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(SafeTLSConfig) - (*in).DeepCopyInto(*out) - } - in.BearerTokenSecret.DeepCopyInto(&out.BearerTokenSecret) - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.OAuth2 != nil { - in, out := &in.OAuth2, &out.OAuth2 - *out = new(OAuth2) - (*in).DeepCopyInto(*out) - } if in.MetricRelabelConfigs != nil { in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs *out = make([]RelabelConfig, len(*in)) @@ -2364,6 +2443,7 @@ func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.HTTPConfig.DeepCopyInto(&out.HTTPConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. @@ -2709,58 +2789,6 @@ func (in *PrometheusStatus) DeepCopy() *PrometheusStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusTracingConfig) DeepCopyInto(out *PrometheusTracingConfig) { - *out = *in - if in.ClientType != nil { - in, out := &in.ClientType, &out.ClientType - *out = new(string) - **out = **in - } - if in.SamplingFraction != nil { - in, out := &in.SamplingFraction, &out.SamplingFraction - x := (*in).DeepCopy() - *out = &x - } - if in.Insecure != nil { - in, out := &in.Insecure, &out.Insecure - *out = new(bool) - **out = **in - } - if in.Headers != nil { - in, out := &in.Headers, &out.Headers - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Compression != nil { - in, out := &in.Compression, &out.Compression - *out = new(string) - **out = **in - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(Duration) - **out = **in - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusTracingConfig. -func (in *PrometheusTracingConfig) DeepCopy() *PrometheusTracingConfig { - if in == nil { - return nil - } - out := new(PrometheusTracingConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrometheusWebSpec) DeepCopyInto(out *PrometheusWebSpec) { *out = *in @@ -3125,6 +3153,26 @@ func (in *RetainConfig) DeepCopy() *RetainConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Rule) DeepCopyInto(out *Rule) { *out = *in @@ -3614,6 +3662,26 @@ func (in *Sigv4) DeepCopy() *Sigv4 { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { *out = *in @@ -3644,6 +3712,7 @@ func (in *StorageSpec) DeepCopy() *StorageSpec { func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { *out = *in in.SafeTLSConfig.DeepCopyInto(&out.SafeTLSConfig) + out.TLSFilesConfig = in.TLSFilesConfig } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. @@ -3656,6 +3725,21 @@ func (in *TLSConfig) DeepCopy() *TLSConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSFilesConfig) DeepCopyInto(out *TLSFilesConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSFilesConfig. +func (in *TLSFilesConfig) DeepCopy() *TLSFilesConfig { + if in == nil { + return nil + } + out := new(TLSFilesConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TSDBSpec) DeepCopyInto(out *TSDBSpec) { *out = *in @@ -3823,6 +3907,16 @@ func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { *out = new(string) **out = **in } + if in.PodManagementPolicy != nil { + in, out := &in.PodManagementPolicy, &out.PodManagementPolicy + *out = new(PodManagementPolicyType) + **out = **in + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(StatefulSetUpdateStrategy) + (*in).DeepCopyInto(*out) + } if in.QueryEndpoints != nil { in, out := &in.QueryEndpoints, &out.QueryEndpoints *out = make([]string, len(*in)) @@ -4122,6 +4216,58 @@ func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfig) DeepCopyInto(out *TracingConfig) { + *out = *in + if in.ClientType != nil { + in, out := &in.ClientType, &out.ClientType + *out = new(string) + **out = **in + } + if in.SamplingFraction != nil { + in, out := &in.SamplingFraction, &out.SamplingFraction + x := (*in).DeepCopy() + *out = &x + } + if in.Insecure != nil { + in, out := &in.Insecure, &out.Insecure + *out = new(bool) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Duration) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfig. +func (in *TracingConfig) DeepCopy() *TracingConfig { + if in == nil { + return nil + } + out := new(TracingConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebConfigFileFields) DeepCopyInto(out *WebConfigFileFields) { *out = *in diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6320f4eb4c..0b99d832fa 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -4,13 +4,17 @@ // Package http2 implements the HTTP/2 protocol. // -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) +// Almost no users should need to import this package directly. +// The net/http package supports HTTP/2 natively. // -// See https://http2.github.io/ for more information on HTTP/2. +// To enable or disable HTTP/2 support in net/http clients and servers, see +// [http.Transport.Protocols] and [http.Server.Protocols]. +// +// To configure HTTP/2 parameters, see +// [http.Transport.HTTP2] and [http.Server.HTTP2]. +// +// To create HTTP/1 or HTTP/2 connections, see +// [http.Transport.NewClientConn]. package http2 // import "golang.org/x/net/http2" import ( diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7ef807f79d..65da5175c9 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -164,6 +164,8 @@ type Server struct { // NewWriteScheduler constructs a write scheduler for a connection. // If nil, a default scheduler is chosen. + // + // Deprecated: User-provided write schedulers are deprecated. NewWriteScheduler func() WriteScheduler // CountError, if non-nil, is called on HTTP/2 server errors. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 8cf64b78e2..2e9c2f6a52 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -712,10 +712,6 @@ func canRetryError(err error) bool { return true } if se, ok := err.(StreamError); ok { - if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { - // See golang/go#47635, golang/go#42777 - return true - } return se.Code == ErrCodeRefusedStream } return false @@ -3233,10 +3229,6 @@ func (gz *gzipReader) Close() error { return gz.body.Close() } -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 7de27be525..551545f313 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -8,6 +8,8 @@ import "fmt" // WriteScheduler is the interface implemented by HTTP/2 write schedulers. // Methods are never called concurrently. +// +// Deprecated: User-provided write schedulers are deprecated. type WriteScheduler interface { // OpenStream opens a new stream in the write scheduler. // It is illegal to call this with streamID=0 or with a streamID that is @@ -38,6 +40,8 @@ type WriteScheduler interface { } // OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +// +// Deprecated: User-provided write schedulers are deprecated. type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. @@ -47,6 +51,8 @@ type OpenStreamOptions struct { } // FrameWriteRequest is a request to write a frame. +// +// Deprecated: User-provided write schedulers are deprecated. type FrameWriteRequest struct { // write is the interface value that does the writing, once the // WriteScheduler has selected this frame to write. The write diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index 7803a9261b..c3d3e9bed6 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -14,6 +14,8 @@ import ( const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +// +// Deprecated: User-provided write schedulers are deprecated. type PriorityWriteSchedulerConfig struct { // MaxClosedNodesInTree controls the maximum number of closed streams to // retain in the priority tree. Setting this to zero saves a small amount @@ -55,6 +57,9 @@ type PriorityWriteSchedulerConfig struct { // NewPriorityWriteScheduler constructs a WriteScheduler that schedules // frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. // If cfg is nil, default options are used. +// +// Deprecated: The RFC 7540 write scheduler has known bugs and performance issues, +// and RFC 7540 prioritization was deprecated in RFC 9113. func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { return newPriorityWriteSchedulerRFC7540(cfg) } diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index f2e55e05ce..d5d4e22148 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -10,6 +10,8 @@ import "math" // priorities. Control frames like SETTINGS and PING are written before DATA // frames, but if no control frames are queued and multiple streams have queued // HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +// +// Deprecated: User-provided write schedulers are deprecated. func NewRandomWriteScheduler() WriteScheduler { return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} } diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go index 4051830982..90ca138af3 100644 --- a/vendor/golang.org/x/sync/singleflight/singleflight.go +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -22,7 +22,7 @@ var errGoexit = errors.New("runtime.Goexit was called") // A panicError is an arbitrary value recovered from a panic // with the stack trace during the execution of given function. type panicError struct { - value interface{} + value any stack []byte } @@ -40,7 +40,7 @@ func (p *panicError) Unwrap() error { return err } -func newPanicError(v interface{}) error { +func newPanicError(v any) error { stack := debug.Stack() // The first line of the stack trace is of the form "goroutine N [status]:" @@ -58,7 +58,7 @@ type call struct { // These fields are written once before the WaitGroup is done // and are only read after the WaitGroup is done. - val interface{} + val any err error // These fields are read and written with the singleflight @@ -78,7 +78,7 @@ type Group struct { // Result holds the results of Do, so they can be passed // on a channel. type Result struct { - Val interface{} + Val any Err error Shared bool } @@ -88,7 +88,7 @@ type Result struct { // time. If a duplicate comes in, the duplicate caller waits for the // original to complete and receives the same results. // The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { +func (g *Group) Do(key string, fn func() (any, error)) (v any, err error, shared bool) { g.mu.Lock() if g.m == nil { g.m = make(map[string]*call) @@ -118,7 +118,7 @@ func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, e // results when they are ready. // // The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { +func (g *Group) DoChan(key string, fn func() (any, error)) <-chan Result { ch := make(chan Result, 1) g.mu.Lock() if g.m == nil { @@ -141,7 +141,7 @@ func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result } // doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { +func (g *Group) doCall(c *call, key string, fn func() (any, error)) { normalReturn := false recovered := false diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index 60ad425f34..239b10c4da 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -18,8 +18,11 @@ import ( // // Two Cursors compare equal if they represent the same node. // -// Call [Inspector.Root] to obtain a valid cursor for the virtual root -// node of the traversal. +// The zero value of Cursor is not valid. +// +// Call [Inspector.Root] to obtain a cursor for the virtual root node +// of the traversal. This is the sole valid cursor for which [Cursor.Node] +// returns nil. // // Use the following methods to navigate efficiently around the tree: // - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing]; @@ -37,7 +40,7 @@ type Cursor struct { index int32 // index of push node; -1 for virtual root node } -// Root returns a cursor for the virtual root node, +// Root returns a valid cursor for the virtual root node, // whose children are the files provided to [New]. // // Its [Cursor.Node] method return nil. @@ -61,14 +64,23 @@ func (in *Inspector) At(index int32) Cursor { return Cursor{in, index} } +// Valid reports whether the cursor is valid. +// The zero value of cursor is invalid. +// Unless otherwise documented, it is not safe to call +// any other method on an invalid cursor. +func (c Cursor) Valid() bool { + return c.in != nil +} + // Inspector returns the cursor's Inspector. +// It returns nil if the Cursor is not valid. func (c Cursor) Inspector() *Inspector { return c.in } // Index returns the index of this cursor position within the package. // // Clients should not assume anything about the numeric Index value // except that it increases monotonically throughout the traversal. -// It is provided for use with [At]. +// It is provided for use with [Inspector.At]. // // Index must not be called on the Root node. func (c Cursor) Index() int32 { @@ -89,7 +101,7 @@ func (c Cursor) Node() ast.Node { // String returns information about the cursor's node, if any. func (c Cursor) String() string { - if c.in == nil { + if !c.Valid() { return "(invalid)" } if c.index < 0 { @@ -233,6 +245,18 @@ func (c Cursor) ParentEdge() (edge.Kind, int) { return unpackEdgeKindAndIndex(events[pop].parent) } +// ParentEdgeKind returns the kind component of the result of [Cursor.ParentEdge]. +func (c Cursor) ParentEdgeKind() edge.Kind { + ek, _ := c.ParentEdge() + return ek +} + +// ParentEdgeIndex returns the index component of the result of [Cursor.ParentEdge]. +func (c Cursor) ParentEdgeIndex() int { + _, index := c.ParentEdge() + return index +} + // ChildAt returns the cursor for the child of the // current node identified by its edge and index. // The index must be -1 if the edge.Kind is not a slice. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index a703cdfcf9..b414d17ebd 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -87,7 +87,7 @@ type event struct { // Type can be recovered from the sole bit in typ. // [Tried this, wasn't faster. --adonovan] -// Preorder visits all the nodes of the files supplied to New in +// Preorder visits all the nodes of the files supplied to [New] in // depth-first order. It calls f(n) for each node n before it visits // n's children. // @@ -133,7 +133,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { } } -// Nodes visits the nodes of the files supplied to New in depth-first +// Nodes visits the nodes of the files supplied to [New] in depth-first // order. It calls f(n, true) for each node n before it visits n's // children. If f returns true, Nodes invokes f recursively for each // of the non-nil children of the node, followed by a call of diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go index c576dc70ac..b68c553d41 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/iter.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -12,13 +12,31 @@ import ( ) // PreorderSeq returns an iterator that visits all the -// nodes of the files supplied to New in depth-first order. +// nodes of the files supplied to [New] in depth-first order. // It visits each node n before n's children. // The complete traversal sequence is determined by ast.Inspect. // -// The types argument, if non-empty, enables type-based -// filtering of events: only nodes whose type matches an -// element of the types slice are included in the sequence. +// The types argument, if non-empty, enables type-based filtering: +// only nodes whose type matches an element of the types slice are +// included in the sequence. +// +// Example: +// +// for call := range in.PreorderSeq((*ast.CallExpr)(nil)) { ... } +// +// The [All] function is more convenient if there is exactly one node type: +// +// for call := range All[*ast.CallExpr](in) { ... } +// +// See also the newer and more flexible [Cursor] API, which lets you +// start the traversal at an arbitrary node, and reports each matching +// node by its Cursor, enabling easier navigation. +// The above example would be written thus: +// +// for curCall := range in.Root().Preorder((*ast.CallExpr)(nil)) { +// call := curCall.Node().(*ast.CallExpr) +// ... +// } func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { // This implementation is identical to Preorder, @@ -53,6 +71,16 @@ func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { // Example: // // for call := range All[*ast.CallExpr](in) { ... } +// +// See also the newer and more flexible [Cursor] API, which lets you +// start the traversal at an arbitrary node, and reports each matching +// node by its Cursor, enabling easier navigation. +// The above example would be written thus: +// +// for curCall := range in.Root().Preorder((*ast.CallExpr)(nil)) { +// call := curCall.Node().(*ast.CallExpr) +// ... +// } func All[N interface { *S ast.Node diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index ff607389da..b249a5c7ef 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -284,6 +284,8 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { } } + ld.externalDriver = external + return ld.refine(response) } @@ -692,10 +694,11 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage // keyed by Package.ID Config - sizes types.Sizes // non-nil if needed by mode - parseCache map[string]*parseValue - parseCacheMu sync.Mutex - exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + sizes types.Sizes // non-nil if needed by mode + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + externalDriver bool // true if an external GOPACKAGESDRIVER handled the request // Config.Mode contains the implied mode (see impliedLoadMode). // Implied mode contains all the fields we need the data for. @@ -1226,6 +1229,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } if lpkg.Module != nil && lpkg.Module.GoVersion != "" { tc.GoVersion = "go" + lpkg.Module.GoVersion + } else if ld.externalDriver && lpkg.goVersion != 0 { + // Module information is missing when GOPACKAGESDRIVER is used, + // so use the go version from the driver response. + tc.GoVersion = fmt.Sprintf("go1.%d", lpkg.goVersion) } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 6646bf5508..56723d1f82 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -29,7 +29,6 @@ import ( "strconv" "strings" - "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typesinternal" ) @@ -281,10 +280,10 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { T := o.Type() if alias, ok := T.(*types.Alias); ok { - if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { + if r := findTypeParam(obj, alias.TypeParams(), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + if r := find(obj, alias.Rhs(), append(path, opRhs)); r != nil { return Path(r), nil } @@ -694,14 +693,11 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { case opRhs: if alias, ok := t.(*types.Alias); ok { - t = aliases.Rhs(alias) - } else if false && aliases.Enabled() { - // The Enabled check is too expensive, so for now we - // simply assume that aliases are not enabled. - // + t = alias.Rhs() + } else if false { // Now that go1.24 is assured, we should be able to - // replace this with "if true {", but it causes tests - // to fail. TODO(adonovan): investigate. + // replace this with "if true {", but it causes objectpath + // tests to fail. TODO(adonovan): investigate. return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index b9425f5a20..a4ae04bc71 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -9,30 +9,10 @@ import ( "go/types" ) -// Package aliases defines backward compatible shims -// for the types.Alias type representation added in 1.22. -// This defines placeholders for x/tools until 1.26. - -// NewAlias creates a new TypeName in Package pkg that +// New creates a new TypeName in Package pkg that // is an alias for the type rhs. -// -// The enabled parameter determines whether the resulting [TypeName]'s -// type is an [types.Alias]. Its value must be the result of a call to -// [Enabled], which computes the effective value of -// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled -// function is expensive and should be called once per task (e.g. -// package import), not once per call to NewAlias. -// -// Precondition: enabled || len(tparams)==0. -// If materialized aliases are disabled, there must not be any type parameters. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { - if enabled { - tname := types.NewTypeName(pos, pkg, name, nil) - SetTypeParams(types.NewAlias(tname, rhs), tparams) - return tname - } - if len(tparams) > 0 { - panic("cannot create an alias with type parameters when gotypesalias is not enabled") - } - return types.NewTypeName(pos, pkg, name, rhs) +func New(pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { + tname := types.NewTypeName(pos, pkg, name, nil) + types.NewAlias(tname, rhs).SetTypeParams(tparams) + return tname } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go deleted file mode 100644 index 7716a3331d..0000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package aliases - -import ( - "go/ast" - "go/parser" - "go/token" - "go/types" -) - -// Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *types.Alias) types.Type { - if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { - return alias.Rhs() // go1.23+ - } - - // go1.22's Alias didn't have the Rhs method, - // so Unalias is the best we can do. - return types.Unalias(alias) -} - -// TypeParams returns the type parameter list of the alias. -func TypeParams(alias *types.Alias) *types.TypeParamList { - if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { - return alias.TypeParams() // go1.23+ - } - return nil -} - -// SetTypeParams sets the type parameters of the alias type. -func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { - if alias, ok := any(alias).(interface { - SetTypeParams(tparams []*types.TypeParam) - }); ok { - alias.SetTypeParams(tparams) // go1.23+ - } else if len(tparams) > 0 { - panic("cannot set type parameters of an Alias type in go1.22") - } -} - -// TypeArgs returns the type arguments used to instantiate the Alias type. -func TypeArgs(alias *types.Alias) *types.TypeList { - if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { - return alias.TypeArgs() // go1.23+ - } - return nil // empty (go1.22) -} - -// Origin returns the generic Alias type of which alias is an instance. -// If alias is not an instance of a generic alias, Origin returns alias. -func Origin(alias *types.Alias) *types.Alias { - if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { - return alias.Origin() // go1.23+ - } - return alias // not an instance of a generic alias (go1.22) -} - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// This function is expensive! Call it sparingly. -func Enabled() bool { - // The only reliable way to compute the answer is to invoke go/types. - // We don't parse the GODEBUG environment variable, because - // (a) it's tricky to do so in a manner that is consistent - // with the godebug package; in particular, a simple - // substring check is not good enough. The value is a - // rightmost-wins list of options. But more importantly: - // (b) it is impossible to detect changes to the effective - // setting caused by os.Setenv("GODEBUG"), as happens in - // many tests. Therefore any attempt to cache the result - // is just incorrect. - fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) - pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) - _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) - return enabled -} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go index ade5d1e799..42c218818a 100644 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -7,6 +7,7 @@ package core import ( "fmt" + "iter" "time" "golang.org/x/tools/internal/event/label" @@ -34,10 +35,8 @@ func (ev Event) Format(f fmt.State, r rune) { if !ev.at.IsZero() { fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) } - for index := 0; ev.Valid(index); index++ { - if l := ev.Label(index); l.Valid() { - fmt.Fprintf(f, "\n\t%v", l) - } + for l := range ev.Labels() { + fmt.Fprintf(f, "\n\t%v", l) } } @@ -52,6 +51,22 @@ func (ev Event) Label(index int) label.Label { return ev.dynamic[index-len(ev.static)] } +// Labels returns an iterator over the event's valid labels. +func (ev Event) Labels() iter.Seq[label.Label] { + return func(yield func(label.Label) bool) { + for _, l := range ev.static { + if l.Valid() && !yield(l) { + return + } + } + for _, l := range ev.dynamic { + if l.Valid() && !yield(l) { + return + } + } + } +} + func (ev Event) Find(key label.Key) label.Label { for _, l := range ev.static { if l.Key() == key { diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index 4cfa51b612..ac78bc3b9c 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -6,14 +6,13 @@ package keys import ( "fmt" - "io" "math" "strconv" "golang.org/x/tools/internal/event/label" ) -// Value represents a key for untyped values. +// Value is a [label.Key] for untyped values. type Value struct { name string description string @@ -27,11 +26,11 @@ func New(name, description string) *Value { func (k *Value) Name() string { return k.name } func (k *Value) Description() string { return k.description } -func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { - fmt.Fprint(w, k.From(l)) +func (k *Value) Append(buf []byte, l label.Label) []byte { + return fmt.Append(buf, k.From(l)) } -// Get can be used to get a label for the key from a label.Map. +// Get returns the label for the key of a label.Map. func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) @@ -39,7 +38,7 @@ func (k *Value) Get(lm label.Map) any { return nil } -// From can be used to get a value from a Label. +// From returns the value of a Label. func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. @@ -54,7 +53,7 @@ type Tag struct { description string } -// NewTag creates a new Key for tagging labels. +// NewTag creates a new [label.Key] for tagging labels. func NewTag(name, description string) *Tag { return &Tag{name: name, description: description} } @@ -62,18 +61,18 @@ func NewTag(name, description string) *Tag { func (k *Tag) Name() string { return k.name } func (k *Tag) Description() string { return k.description } -func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} +func (k *Tag) Append(buf []byte, l label.Label) []byte { return buf } // New creates a new Label with this key. func (k *Tag) New() label.Label { return label.OfValue(k, nil) } -// Int represents a key +// Int is a [label.Key] for signed integers. type Int struct { name string description string } -// NewInt creates a new Key for int values. +// NewInt returns a new [label.Key] for int64 values. func NewInt(name, description string) *Int { return &Int{name: name, description: description} } @@ -81,381 +80,92 @@ func NewInt(name, description string) *Int { func (k *Int) Name() string { return k.name } func (k *Int) Description() string { return k.description } -func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +func (k *Int) Append(buf []byte, l label.Label) []byte { + return strconv.AppendInt(buf, k.From(l), 10) } // Of creates a new Label with this key and the supplied value. -func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } +func (k *Int) Of(v int) label.Label { return k.Of64(int64(v)) } -// Get can be used to get a label for the key from a label.Map. -func (k *Int) Get(lm label.Map) int { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } - -// Int8 represents a key -type Int8 struct { - name string - description string -} - -// NewInt8 creates a new Key for int8 values. -func NewInt8(name, description string) *Int8 { - return &Int8{name: name, description: description} -} - -func (k *Int8) Name() string { return k.name } -func (k *Int8) Description() string { return k.description } - -func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int8) Get(lm label.Map) int8 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } - -// Int16 represents a key -type Int16 struct { - name string - description string -} - -// NewInt16 creates a new Key for int16 values. -func NewInt16(name, description string) *Int16 { - return &Int16{name: name, description: description} -} - -func (k *Int16) Name() string { return k.name } -func (k *Int16) Description() string { return k.description } - -func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int16) Get(lm label.Map) int16 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } - -// Int32 represents a key -type Int32 struct { - name string - description string -} +// Of64 creates a new Label with this key and the supplied value. +func (k *Int) Of64(v int64) label.Label { return label.Of64(k, uint64(v)) } -// NewInt32 creates a new Key for int32 values. -func NewInt32(name, description string) *Int32 { - return &Int32{name: name, description: description} -} - -func (k *Int32) Name() string { return k.name } -func (k *Int32) Description() string { return k.description } - -func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int32) Get(lm label.Map) int32 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } - -// Int64 represents a key -type Int64 struct { - name string - description string -} - -// NewInt64 creates a new Key for int64 values. -func NewInt64(name, description string) *Int64 { - return &Int64{name: name, description: description} -} - -func (k *Int64) Name() string { return k.name } -func (k *Int64) Description() string { return k.description } - -func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, k.From(l), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int64) Get(lm label.Map) int64 { +// Get returns the label for the key of a label.Map. +func (k *Int) Get(lm label.Map) int64 { if t := lm.Find(k); t.Valid() { return k.From(t) } return 0 } -// From can be used to get a value from a Label. -func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } +// From returns the value of a Label. +func (k *Int) From(t label.Label) int64 { return int64(t.Unpack64()) } -// UInt represents a key -type UInt struct { +// Uint is a [label.Key] for unsigned integers. +type Uint struct { name string description string } -// NewUInt creates a new Key for uint values. -func NewUInt(name, description string) *UInt { - return &UInt{name: name, description: description} +// NewUint creates a new [label.Key] for unsigned values. +func NewUint(name, description string) *Uint { + return &Uint{name: name, description: description} } -func (k *UInt) Name() string { return k.name } -func (k *UInt) Description() string { return k.description } +func (k *Uint) Name() string { return k.name } +func (k *Uint) Description() string { return k.description } -func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +func (k *Uint) Append(buf []byte, l label.Label) []byte { + return strconv.AppendUint(buf, k.From(l), 10) } // Of creates a new Label with this key and the supplied value. -func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } +func (k *Uint) Of(v uint64) label.Label { return label.Of64(k, v) } -// Get can be used to get a label for the key from a label.Map. -func (k *UInt) Get(lm label.Map) uint { +// Get returns the label for the key of a label.Map. +func (k *Uint) Get(lm label.Map) uint64 { if t := lm.Find(k); t.Valid() { return k.From(t) } return 0 } -// From can be used to get a value from a Label. -func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } +// From returns the value of a Label. +func (k *Uint) From(t label.Label) uint64 { return t.Unpack64() } -// UInt8 represents a key -type UInt8 struct { +// Float is a label.Key for floating-point values. +type Float struct { name string description string } -// NewUInt8 creates a new Key for uint8 values. -func NewUInt8(name, description string) *UInt8 { - return &UInt8{name: name, description: description} +// NewFloat creates a new [label.Key] for floating-point values. +func NewFloat(name, description string) *Float { + return &Float{name: name, description: description} } -func (k *UInt8) Name() string { return k.name } -func (k *UInt8) Description() string { return k.description } +func (k *Float) Name() string { return k.name } +func (k *Float) Description() string { return k.description } -func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +func (k *Float) Append(buf []byte, l label.Label) []byte { + return strconv.AppendFloat(buf, k.From(l), 'E', -1, 64) } // Of creates a new Label with this key and the supplied value. -func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt8) Get(lm label.Map) uint8 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } - -// UInt16 represents a key -type UInt16 struct { - name string - description string -} - -// NewUInt16 creates a new Key for uint16 values. -func NewUInt16(name, description string) *UInt16 { - return &UInt16{name: name, description: description} -} - -func (k *UInt16) Name() string { return k.name } -func (k *UInt16) Description() string { return k.description } - -func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt16) Get(lm label.Map) uint16 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } - -// UInt32 represents a key -type UInt32 struct { - name string - description string -} - -// NewUInt32 creates a new Key for uint32 values. -func NewUInt32(name, description string) *UInt32 { - return &UInt32{name: name, description: description} -} - -func (k *UInt32) Name() string { return k.name } -func (k *UInt32) Description() string { return k.description } - -func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt32) Get(lm label.Map) uint32 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } - -// UInt64 represents a key -type UInt64 struct { - name string - description string -} - -// NewUInt64 creates a new Key for uint64 values. -func NewUInt64(name, description string) *UInt64 { - return &UInt64{name: name, description: description} -} - -func (k *UInt64) Name() string { return k.name } -func (k *UInt64) Description() string { return k.description } - -func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, k.From(l), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt64) Get(lm label.Map) uint64 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } - -// Float32 represents a key -type Float32 struct { - name string - description string -} - -// NewFloat32 creates a new Key for float32 values. -func NewFloat32(name, description string) *Float32 { - return &Float32{name: name, description: description} -} - -func (k *Float32) Name() string { return k.name } -func (k *Float32) Description() string { return k.description } - -func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Float32) Of(v float32) label.Label { - return label.Of64(k, uint64(math.Float32bits(v))) -} - -// Get can be used to get a label for the key from a label.Map. -func (k *Float32) Get(lm label.Map) float32 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Float32) From(t label.Label) float32 { - return math.Float32frombits(uint32(t.Unpack64())) -} - -// Float64 represents a key -type Float64 struct { - name string - description string -} - -// NewFloat64 creates a new Key for int64 values. -func NewFloat64(name, description string) *Float64 { - return &Float64{name: name, description: description} -} - -func (k *Float64) Name() string { return k.name } -func (k *Float64) Description() string { return k.description } - -func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Float64) Of(v float64) label.Label { +func (k *Float) Of(v float64) label.Label { return label.Of64(k, math.Float64bits(v)) } -// Get can be used to get a label for the key from a label.Map. -func (k *Float64) Get(lm label.Map) float64 { +// Get returns the label for the key of a label.Map. +func (k *Float) Get(lm label.Map) float64 { if t := lm.Find(k); t.Valid() { return k.From(t) } return 0 } -// From can be used to get a value from a Label. -func (k *Float64) From(t label.Label) float64 { +// From returns the value of a Label. +func (k *Float) From(t label.Label) float64 { return math.Float64frombits(t.Unpack64()) } @@ -473,14 +183,14 @@ func NewString(name, description string) *String { func (k *String) Name() string { return k.name } func (k *String) Description() string { return k.description } -func (k *String) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendQuote(buf, k.From(l))) +func (k *String) Append(buf []byte, l label.Label) []byte { + return strconv.AppendQuote(buf, k.From(l)) } // Of creates a new Label with this key and the supplied value. func (k *String) Of(v string) label.Label { return label.OfString(k, v) } -// Get can be used to get a label for the key from a label.Map. +// Get returns the label for the key of a label.Map. func (k *String) Get(lm label.Map) string { if t := lm.Find(k); t.Valid() { return k.From(t) @@ -488,53 +198,16 @@ func (k *String) Get(lm label.Map) string { return "" } -// From can be used to get a value from a Label. +// From returns the value of a Label. func (k *String) From(t label.Label) string { return t.UnpackString() } -// Boolean represents a key -type Boolean struct { - name string - description string -} - -// NewBoolean creates a new Key for bool values. -func NewBoolean(name, description string) *Boolean { - return &Boolean{name: name, description: description} -} - -func (k *Boolean) Name() string { return k.name } -func (k *Boolean) Description() string { return k.description } - -func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendBool(buf, k.From(l))) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Boolean) Of(v bool) label.Label { - if v { - return label.Of64(k, 1) - } - return label.Of64(k, 0) -} - -// Get can be used to get a label for the key from a label.Map. -func (k *Boolean) Get(lm label.Map) bool { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return false -} - -// From can be used to get a value from a Label. -func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } - // Error represents a key type Error struct { name string description string } -// NewError creates a new Key for int64 values. +// NewError returns a new [label.Key] for error values. func NewError(name, description string) *Error { return &Error{name: name, description: description} } @@ -542,14 +215,14 @@ func NewError(name, description string) *Error { func (k *Error) Name() string { return k.name } func (k *Error) Description() string { return k.description } -func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { - io.WriteString(w, k.From(l).Error()) +func (k *Error) Append(buf []byte, l label.Label) []byte { + return append(buf, k.From(l).Error()...) } -// Of creates a new Label with this key and the supplied value. +// Of returns a new Label with this key and the supplied value. func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } -// Get can be used to get a label for the key from a label.Map. +// Get returns the label for the key of a label.Map. func (k *Error) Get(lm label.Map) error { if t := lm.Find(k); t.Valid() { return k.From(t) @@ -557,7 +230,7 @@ func (k *Error) Get(lm label.Map) error { return nil } -// From can be used to get a value from a Label. +// From returns the value of a Label. func (k *Error) From(t label.Label) error { err, _ := t.UnpackValue().(error) return err diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index c37584af94..e84226f879 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -19,12 +19,8 @@ type Key interface { Name() string // Description returns a string that can be used to describe the value. Description() string - - // Format is used in formatting to append the value of the label to the - // supplied buffer. - // The formatter may use the supplied buf as a scratch area to avoid - // allocations. - Format(w io.Writer, buf []byte, l Label) + // Append appends the formatted value of the label to the supplied buffer. + Append(buf []byte, l Label) []byte } // Label holds a key and value pair. @@ -131,8 +127,7 @@ func (t Label) Format(f fmt.State, r rune) { } io.WriteString(f, t.Key().Name()) io.WriteString(f, "=") - var buf [128]byte - t.Key().Format(f, buf[:0], t) + f.Write(t.Key().Append(nil, t)) // ignore error } func (l *list) Valid(index int) bool { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 2bef2b058b..4c9450f4ee 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -242,7 +242,6 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/aliases" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -767,11 +766,11 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + alias, materialized := t.(*types.Alias) // perhaps false for certain built-ins? var tparams *types.TypeParamList if materialized { - tparams = aliases.TypeParams(alias) + tparams = alias.TypeParams() } if tparams.Len() == 0 { w.tag(aliasTag) @@ -785,7 +784,7 @@ func (p *iexporter) doDecl(obj types.Object) { if materialized { // Preserve materialized aliases, // even of non-exported types. - t = aliases.Rhs(alias) + t = alias.Rhs() } w.typ(t, obj.Pkg()) break @@ -1011,11 +1010,11 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *types.Alias: - if targs := aliases.TypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(aliases.Origin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(aliasType) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 4d6d50094a..1ee4e93549 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -210,7 +210,6 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte p := iimporter{ version: int(version), ipath: path, - aliases: aliases.Enabled(), shallow: shallow, reportf: reportf, @@ -370,7 +369,6 @@ type iimporter struct { version int ipath string - aliases bool shallow bool reportf ReportFunc // if non-nil, used to report bugs @@ -576,7 +574,7 @@ func (r *importReader) obj(pkg *types.Package, name string) { tparams = r.tparamList() } typ := r.typ() - obj := aliases.NewAlias(r.p.aliases, pos, pkg, name, typ, tparams) + obj := aliases.New(pos, pkg, name, typ, tparams) markBlack(obj) // workaround for golang/go#69912 r.declare(obj) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 37b4a39e9e..2e0d80585f 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -26,7 +26,6 @@ type pkgReader struct { ctxt *types.Context imports map[string]*types.Package // previously imported packages, indexed by path - aliases bool // create types.Alias nodes // lazily initialized arrays corresponding to the unified IR // PosBase, Pkg, and Type sections, respectively. @@ -98,7 +97,6 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st ctxt: ctxt, imports: imports, - aliases: aliases.Enabled(), posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), @@ -539,7 +537,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { tparams = r.typeParamNames() } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) + declare(aliases.New(pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index f41431c949..dacfc1dfff 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -307,7 +307,7 @@ var deps = [...]pkginfo{ {"net/textproto", "\x02\x01q\x03\x83\x01\f\n-\x01\x02\x15"}, {"net/url", "t\x03Fc\v\x10\x02\x01\x17"}, {"os", "t+\x01\x19\x03\x10\x14\x01\x03\x01\x05\x10\x018\b\x05\x01\x01\r\x06"}, - {"os/exec", "\x03\ngI'\x01\x15\x01+\x06\a\n\x01\x04\r"}, + {"os/exec", "\x03\ngI'\x01\x15\x01+\x06\a\n\x01\x03\x01\r"}, {"os/exec/internal/fdtest", "\xc2\x02"}, {"os/signal", "\r\x99\x02\x15\x05\x02"}, {"os/user", "\x02\x01q\x03\x83\x01,\r\n\x01\x02"}, diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go index 709d2fc144..4c391876e4 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/free.go +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -6,8 +6,6 @@ package typeparams import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // Free is a memoization of the set of free type parameters within a @@ -38,7 +36,7 @@ func (w *Free) Has(typ types.Type) (res bool) { break case *types.Alias: - if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + if t.TypeParams().Len() > t.TypeArgs().Len() { return true // This is an uninstantiated Alias. } // The expansion of an alias can have free type parameters, diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 51001666ef..7112318fc2 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -25,7 +25,6 @@ import ( "reflect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -142,7 +141,7 @@ var ( func Origin(t NamedOrAlias) NamedOrAlias { switch t := t.(type) { case *types.Alias: - return aliases.Origin(t) + return t.Origin() case *types.Named: return t.Origin() } diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index d45cbe1720..a680beb405 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -48,8 +48,6 @@ How to use klog - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) - See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). -**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. - ### Coexisting with klog/v2 See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index d1a4751c94..73f91ea500 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -20,7 +20,9 @@ import ( "bytes" "encoding/json" "fmt" + "slices" "strconv" + "strings" "github.com/go-logr/logr" ) @@ -51,139 +53,157 @@ func WithValues(oldKV, newKV []interface{}) []interface{} { return kv } -// MergeKVs deduplicates elements provided in two key/value slices. -// -// Keys in each slice are expected to be unique, so duplicates can only occur -// when the first and second slice contain the same key. When that happens, the -// key/value pair from the second slice is used. The first slice must be well-formed -// (= even key/value pairs). The second one may have a missing value, in which -// case the special "missing value" is added to the result. -func MergeKVs(first, second []interface{}) []interface{} { - maxLength := len(first) + (len(second)+1)/2*2 - if maxLength == 0 { - // Nothing to do at all. - return nil - } - - if len(first) == 0 && len(second)%2 == 0 { - // Nothing to be overridden, second slice is well-formed - // and can be used directly. - return second - } - - // Determine which keys are in the second slice so that we can skip - // them when iterating over the first one. The code intentionally - // favors performance over completeness: we assume that keys are string - // constants and thus compare equal when the string values are equal. A - // string constant being overridden by, for example, a fmt.Stringer is - // not handled. - overrides := map[interface{}]bool{} - for i := 0; i < len(second); i += 2 { - overrides[second[i]] = true - } - merged := make([]interface{}, 0, maxLength) - for i := 0; i+1 < len(first); i += 2 { - key := first[i] - if overrides[key] { - continue - } - merged = append(merged, key, first[i+1]) - } - merged = append(merged, second...) - if len(merged)%2 != 0 { - merged = append(merged, missingValue) - } - return merged -} - type Formatter struct { AnyToStringHook AnyToStringFunc } type AnyToStringFunc func(v interface{}) string -// MergeKVsInto is a variant of MergeKVs which directly formats the key/value -// pairs into a buffer. -func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { - if len(first) == 0 && len(second) == 0 { - // Nothing to do at all. - return - } +const missingValue = "(MISSING)" - if len(first) == 0 && len(second)%2 == 0 { - // Nothing to be overridden, second slice is well-formed - // and can be used directly. - for i := 0; i < len(second); i += 2 { - f.KVFormat(b, second[i], second[i+1]) - } - return - } +func FormatKVs(b *bytes.Buffer, kvs ...[]interface{}) { + Formatter{}.FormatKVs(b, kvs...) +} - // Determine which keys are in the second slice so that we can skip - // them when iterating over the first one. The code intentionally - // favors performance over completeness: we assume that keys are string - // constants and thus compare equal when the string values are equal. A - // string constant being overridden by, for example, a fmt.Stringer is - // not handled. - overrides := map[interface{}]bool{} - for i := 0; i < len(second); i += 2 { - overrides[second[i]] = true - } - for i := 0; i < len(first); i += 2 { - key := first[i] - if overrides[key] { - continue +// FormatKVs formats all key/value pairs such that the output contains no +// duplicates ("last one wins"). +func (f Formatter) FormatKVs(b *bytes.Buffer, kvs ...[]interface{}) { + // De-duplication is done by optimistically formatting all key value + // pairs and then cutting out the output of those key/value pairs which + // got overwritten later. + // + // In the common case of no duplicates, the only overhead is tracking + // previous keys. This uses a slice with a simple linear search because + // the number of entries is typically so low that allocating a map or + // keeping a sorted slice with binary search aren't justified. + // + // Using a fixed size here makes the Go compiler use the stack as + // initial backing store for the slice, which is crucial for + // performance. + existing := make([]obsoleteKV, 0, 32) + obsolete := make([]interval, 0, 32) // Sorted by start index. + for _, keysAndValues := range kvs { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + var e obsoleteKV + e.start = b.Len() + e.key = f.KVFormat(b, k, v) + e.end = b.Len() + i := findObsoleteEntry(existing, e.key) + if i >= 0 { + data := b.Bytes() + if bytes.Compare(data[existing[i].start:existing[i].end], data[e.start:e.end]) == 0 { + // The new entry gets obsoleted because it's identical. + // This has the advantage that key/value pairs from + // a WithValues call always come first, even if the same + // pair gets added again later. This makes different log + // entries more consistent. + // + // The new entry has a higher start index and thus can be appended. + obsolete = append(obsolete, e.interval) + } else { + // The old entry gets obsoleted because it's value is different. + // + // Sort order is not guaranteed, we have to insert at the right place. + index, _ := slices.BinarySearchFunc(obsolete, existing[i].interval, func(a, b interval) int { return a.start - b.start }) + obsolete = slices.Insert(obsolete, index, existing[i].interval) + existing[i].interval = e.interval + } + } else { + // Instead of appending at the end and doing a + // linear search in findEntry, we could keep + // the slice sorted by key and do a binary search. + // + // Above: + // i, ok := slices.BinarySearchFunc(existing, e, func(a, b entry) int { return strings.Compare(a.key, b.key) }) + // Here: + // existing = slices.Insert(existing, i, e) + // + // But that adds a dependency on the slices package + // and made performance slightly worse, presumably + // because the cost of shifting entries around + // did not pay of with faster lookups. + existing = append(existing, e) + } } - f.KVFormat(b, key, first[i+1]) } - // Round down. - l := len(second) - l = l / 2 * 2 - for i := 1; i < l; i += 2 { - f.KVFormat(b, second[i-1], second[i]) - } - if len(second)%2 == 1 { - f.KVFormat(b, second[len(second)-1], missingValue) - } -} -func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { - Formatter{}.MergeAndFormatKVs(b, first, second) -} + // If we need to remove some obsolete key/value pairs then move the memory. + if len(obsolete) > 0 { + // Potentially the next remaining output (might itself be obsolete). + from := obsolete[0].end + // Next obsolete entry. + nextObsolete := 1 + // This is the source buffer, before truncation. + all := b.Bytes() + b.Truncate(obsolete[0].start) -const missingValue = "(MISSING)" + for nextObsolete < len(obsolete) { + if from == obsolete[nextObsolete].start { + // Skip also the next obsolete key/value. + from = obsolete[nextObsolete].end + nextObsolete++ + continue + } -// KVListFormat serializes all key/value pairs into the provided buffer. -// A space gets inserted before the first pair and between each pair. -func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue + // Preserve some output. Write uses copy, which + // explicitly allows source and destination to overlap. + // That could happen here. + valid := all[from:obsolete[nextObsolete].start] + b.Write(valid) + from = obsolete[nextObsolete].end + nextObsolete++ } - f.KVFormat(b, k, v) + // Copy end of buffer. + valid := all[from:] + b.Write(valid) } } -func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - Formatter{}.KVListFormat(b, keysAndValues...) +type obsoleteKV struct { + key string + interval +} + +// interval includes the start and excludes the end. +type interval struct { + start int + end int } -func KVFormat(b *bytes.Buffer, k, v interface{}) { - Formatter{}.KVFormat(b, k, v) +func findObsoleteEntry(entries []obsoleteKV, key string) int { + for i, entry := range entries { + if entry.key == key { + return i + } + } + return -1 } // formatAny is the fallback formatter for a value. It supports a hook (for // example, for YAML encoding) and itself uses JSON encoding. func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { - b.WriteRune('=') if f.AnyToStringHook != nil { - b.WriteString(f.AnyToStringHook(v)) + str := f.AnyToStringHook(v) + if strings.Contains(str, "\n") { + // If it's multi-line, then pass it through writeStringValue to get start/end delimiters, + // which separates it better from any following key/value pair. + writeStringValue(b, str) + return + } + // Otherwise put it directly after the separator, on the same lime, + // The assumption is that the hook returns something where start/end are obvious. + b.WriteRune('=') + b.WriteString(str) return } + b.WriteRune('=') formatAsJSON(b, v) } diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go index d9c7d15467..b8c7e443d0 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -28,7 +28,7 @@ import ( // KVFormat serializes one key/value pair into the provided buffer. // A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) string { // This is the version without slog support. Must be kept in sync with // the version in keyvalues_slog.go. @@ -37,13 +37,15 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. + var key string if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(sK) + key = sK } else { - b.WriteString(fmt.Sprintf("%s", k)) + key = fmt.Sprintf("%s", k) } + b.WriteString(key) // The type checks are sorted so that more frequently used ones // come first because that is then faster in the common @@ -94,4 +96,6 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { default: f.formatAny(b, v) } + + return key } diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go index 89acf97723..8e00843645 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -29,8 +29,8 @@ import ( ) // KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { +// A space gets inserted before the pair. It returns the key. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) string { // This is the version without slog support. Must be kept in sync with // the version in keyvalues_slog.go. @@ -39,13 +39,15 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. + var key string if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(sK) + key = sK } else { - b.WriteString(fmt.Sprintf("%s", k)) + key = fmt.Sprintf("%s", k) } + b.WriteString(key) // The type checks are sorted so that more frequently used ones // come first because that is then faster in the common @@ -112,6 +114,8 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { default: f.formatAny(b, v) } + + return key } // generateJSON has the same preference for plain strings as KVFormat. diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 47ec9466a6..319ffbe248 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -58,15 +58,30 @@ // // -logtostderr=true // Logs are written to standard error instead of to files. -// This shortcuts most of the usual output routing: -// -alsologtostderr, -stderrthreshold and -log_dir have no -// effect and output redirection at runtime with SetOutput is -// ignored. +// By default, all logs are written regardless of severity +// (legacy behavior). To filter logs by severity when +// -logtostderr=true, set -legacy_stderr_threshold_behavior=false +// and use -stderrthreshold. +// With -legacy_stderr_threshold_behavior=true, +// -stderrthreshold has no effect. +// +// The following flags always have no effect: +// -alsologtostderr, -alsologtostderrthreshold, and -log_dir. +// Output redirection at runtime with SetOutput is also ignored. // -alsologtostderr=false // Logs are written to standard error as well as to files. +// -alsologtostderrthreshold=INFO +// Log events at or above this severity are logged to standard +// error when -alsologtostderr=true (no effect when -logtostderr=true). +// Default is INFO to maintain backward compatibility. // -stderrthreshold=ERROR // Log events at or above this severity are logged to standard -// error as well as to files. +// error as well as to files. When -logtostderr=true, this flag +// has no effect unless -legacy_stderr_threshold_behavior=false. +// -legacy_stderr_threshold_behavior=true +// If true, -stderrthreshold is ignored when -logtostderr=true +// (legacy behavior). If false, -stderrthreshold is honored even +// when -logtostderr=true, allowing severity-based filtering. // -log_dir="" // Log files will be written to this directory instead of the // default temporary directory. @@ -156,7 +171,7 @@ func (s *severityValue) Set(value string) error { } threshold = severity.Severity(v) } - logging.stderrThreshold.set(threshold) + s.set(threshold) return nil } @@ -409,6 +424,15 @@ var commandLine flag.FlagSet // init sets up the defaults and creates command line flags. func init() { + // Initialize severity thresholds + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } + logging.alsologtostderrthreshold = severityValue{ + Severity: severity.InfoLog, // Default alsologtostderrthreshold is INFO (to maintain backward compatibility). + } + logging.setVState(0, nil, false) + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, @@ -416,16 +440,14 @@ func init() { "If the value is 0, the maximum file size is unlimited.") commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") - logging.setVState(0, nil, false) + commandLine.BoolVar(&logging.legacyStderrThresholdBehavior, "legacy_stderr_threshold_behavior", true, "If true, stderrthreshold is ignored when logtostderr=true (legacy behavior). If false, stderrthreshold is honored even when logtostderr=true") commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") - logging.stderrThreshold = severityValue{ - Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. - } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true unless -legacy_stderr_threshold_behavior=false)") + commandLine.Var(&logging.alsologtostderrthreshold, "alsologtostderrthreshold", "logs at or above this threshold go to stderr when -alsologtostderr=true (no effect when -logtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -470,11 +492,13 @@ type settings struct { // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + legacyStderrThresholdBehavior bool // The -legacy_stderr_threshold_behavior flag. // Level flag. Handled atomically. - stderrThreshold severityValue // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. + alsologtostderrthreshold severityValue // The -alsologtostderrthreshold flag. // Access to all of the following fields must be protected via a mutex. @@ -809,16 +833,21 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str // printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { - // Only create a new buffer if we don't have one cached. - b := buffer.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. - b.WriteString(strconv.Quote(msg)) + qMsg := make([]byte, 0, 1024) + qMsg = strconv.AppendQuote(qMsg, msg) + + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + b.Write(qMsg) + + var errKV []interface{} if err != nil { - serialize.KVListFormat(&b.Buffer, "err", err) + errKV = []interface{}{"err", err} } - serialize.KVListFormat(&b.Buffer, keysAndValues...) + serialize.FormatKVs(&b.Buffer, errKV, keysAndValues) l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) @@ -885,9 +914,25 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } } } else if l.toStderr { - os.Stderr.Write(data) + // When logging to stderr only, check if we should filter by severity. + // This is controlled by the legacy_stderr_threshold_behavior flag. + if l.legacyStderrThresholdBehavior { + // Legacy behavior: always write to stderr, ignore stderrthreshold + os.Stderr.Write(data) + } else { + // New behavior: honor stderrthreshold even when logtostderr=true + if s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + } } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + // Write to stderr if any of these conditions are met: + // - alsoToStderr is set (legacy behavior) + // - alsologtostderr is set and severity meets alsologtostderrthreshold + // - alsologtostderr is not set and severity meets stderrThreshold + if alsoToStderr || + (l.alsoToStderr && s >= l.alsologtostderrthreshold.get()) || + (!l.alsoToStderr && s >= l.stderrThreshold.get()) { os.Stderr.Write(data) } diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index efec96fd45..6204c7bb43 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -53,7 +53,7 @@ func (l *klogger) Init(info logr.RuntimeInfo) { } func (l *klogger) Info(level int, msg string, kvList ...interface{}) { - merged := serialize.MergeKVs(l.values, kvList) + merged := serialize.WithValues(l.values, kvList) // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } @@ -63,7 +63,7 @@ func (l *klogger) Enabled(level int) bool { } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { - merged := serialize.MergeKVs(l.values, kvList) + merged := serialize.WithValues(l.values, kvList) ErrorSDepth(l.callDepth+1, err, msg, merged...) } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go index c77d7baafa..901e28dd39 100644 --- a/vendor/k8s.io/klog/v2/klogr_slog.go +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -63,12 +63,17 @@ func slogOutput(file string, line int, now time.Time, err error, s severity.Seve } // See printS. + qMsg := make([]byte, 0, 1024) + qMsg = strconv.AppendQuote(qMsg, msg) + b := buffer.GetBuffer() - b.WriteString(strconv.Quote(msg)) + b.Write(qMsg) + + var errKV []interface{} if err != nil { - serialize.KVListFormat(&b.Buffer, "err", err) + errKV = []interface{}{"err", err} } - serialize.KVListFormat(&b.Buffer, kvList...) + serialize.FormatKVs(&b.Buffer, errKV, kvList) // See print + header. buf := logging.formatHeader(s, file, line, now) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go index 775b3b0c36..29cec61930 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -16,10 +16,6 @@ package spec import ( "encoding/json" - "net/http" - "os" - "path/filepath" - "github.com/go-openapi/jsonreference" "k8s.io/kube-openapi/pkg/internal" @@ -56,52 +52,6 @@ func (r *Ref) RemoteURI() string { return u.String() } -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - rr, err := http.Get(v) - if err != nil { - return false - } - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - // Inherits creates a new reference from a parent and a child // If the child cannot inherit from the parent, an error is returned func (r *Ref) Inherits(child Ref) (*Ref, error) { diff --git a/vendor/k8s.io/utils/buffer/ring_fixed.go b/vendor/k8s.io/utils/buffer/ring_fixed.go new file mode 100644 index 0000000000..a104e12a38 --- /dev/null +++ b/vendor/k8s.io/utils/buffer/ring_fixed.go @@ -0,0 +1,120 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +import ( + "errors" + "io" +) + +// Compile-time check that *TypedRingFixed[byte] implements io.Writer. +var _ io.Writer = (*TypedRingFixed[byte])(nil) + +// ErrInvalidSize indicates size must be > 0 +var ErrInvalidSize = errors.New("size must be positive") + +// TypedRingFixed is a fixed-size circular buffer for elements of type T. +// Writes overwrite older data, keeping only the last N elements. +// Not thread safe. +type TypedRingFixed[T any] struct { + data []T + size int + writeCursor int + written int64 +} + +// NewTypedRingFixed creates a circular buffer with the given capacity (must be > 0). +func NewTypedRingFixed[T any](size int) (*TypedRingFixed[T], error) { + if size <= 0 { + return nil, ErrInvalidSize + } + return &TypedRingFixed[T]{ + data: make([]T, size), + size: size, + }, nil +} + +// Write writes p to the buffer, overwriting old data if needed. +func (r *TypedRingFixed[T]) Write(p []T) (int, error) { + originalLen := len(p) + r.written += int64(originalLen) + + // If the input is larger than our buffer, only keep the last 'size' elements + if originalLen > r.size { + p = p[originalLen-r.size:] + } + + // Copy data, handling wrap-around + n := len(p) + remain := r.size - r.writeCursor + if n <= remain { + copy(r.data[r.writeCursor:], p) + } else { + copy(r.data[r.writeCursor:], p[:remain]) + copy(r.data, p[remain:]) + } + + r.writeCursor = (r.writeCursor + n) % r.size + return originalLen, nil +} + +// Slice returns buffer contents in write order. Don't modify the returned slice. +func (r *TypedRingFixed[T]) Slice() []T { + if r.written == 0 { + return nil + } + + // Buffer hasn't wrapped yet + if r.written < int64(r.size) { + return r.data[:r.writeCursor] + } + + // Buffer has wrapped - need to return data in correct order + // Data from writeCursor to end is oldest, data from 0 to writeCursor is newest + if r.writeCursor == 0 { + return r.data + } + + out := make([]T, r.size) + copy(out, r.data[r.writeCursor:]) + copy(out[r.size-r.writeCursor:], r.data[:r.writeCursor]) + return out +} + +// Size returns the buffer capacity. +func (r *TypedRingFixed[T]) Size() int { + return r.size +} + +// Len returns how many elements are currently in the buffer. +func (r *TypedRingFixed[T]) Len() int { + if r.written < int64(r.size) { + return int(r.written) + } + return r.size +} + +// TotalWritten returns total elements ever written (including overwritten ones). +func (r *TypedRingFixed[T]) TotalWritten() int64 { + return r.written +} + +// Reset clears the buffer. +func (r *TypedRingFixed[T]) Reset() { + r.writeCursor = 0 + r.written = 0 +} diff --git a/vendor/k8s.io/utils/strings/slices/slices.go b/vendor/k8s.io/utils/strings/slices/slices.go index 8e21838f24..35657a7fcd 100644 --- a/vendor/k8s.io/utils/strings/slices/slices.go +++ b/vendor/k8s.io/utils/strings/slices/slices.go @@ -20,27 +20,23 @@ limitations under the License. // replace "stringslices" if the "slices" package becomes standard. package slices +import goslices "slices" + // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in index order, and the // comparison stops at the first unequal pair. -func Equal(s1, s2 []string) bool { - if len(s1) != len(s2) { - return false - } - for i, n := range s1 { - if n != s2[i] { - return false - } - } - return true -} +// +// Deprecated: use stdlib slices.Equal instead. +var Equal = goslices.Equal[[]string] // Filter appends to d each element e of s for which keep(e) returns true. // It returns the modified d. d may be s[:0], in which case the kept // elements will be stored in the same slice. // if the slices overlap in some other way, the results are unspecified. // To create a new slice with the filtered results, pass nil for d. +// +// Deprecated: use stdlib slices.DeleteFunc instead. func Filter(d, s []string, keep func(string) bool) []string { for _, n := range s { if keep(n) { @@ -51,32 +47,17 @@ func Filter(d, s []string, keep func(string) bool) []string { } // Contains reports whether v is present in s. -func Contains(s []string, v string) bool { - return Index(s, v) >= 0 -} +// +// Deprecated: use stdlib slices.Contains instead. +var Contains = goslices.Contains[[]string] // Index returns the index of the first occurrence of v in s, or -1 if // not present. -func Index(s []string, v string) int { - // "Contains" may be replaced with "Index(s, v) >= 0": - // https://github.com/golang/go/issues/45955#issuecomment-873377947 - for i, n := range s { - if n == v { - return i - } - } - return -1 -} - -// Functions below are not in https://github.com/golang/go/issues/45955 +// +// Deprecated: use stdlib slices.Index instead. +var Index = goslices.Index[[]string] // Clone returns a new clone of s. -func Clone(s []string) []string { - // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go - if s == nil { - return nil - } - c := make([]string, len(s)) - copy(c, s) - return c -} +// +// Deprecated: use stdlib slices.Clone instead. +var Clone = goslices.Clone[[]string] diff --git a/vendor/modules.txt b/vendor/modules.txt index 9074d8450a..af078d1035 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -224,8 +224,8 @@ github.com/openshift/api/security/v1/zz_generated.crd-manifests # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.0 -## explicit; go 1.24.0 +# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.90.1 +## explicit; go 1.25.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 # github.com/prometheus/client_golang v1.23.2 @@ -357,7 +357,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# go.yaml.in/yaml/v2 v2.4.3 +# go.yaml.in/yaml/v2 v2.4.4 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 @@ -366,10 +366,10 @@ go.yaml.in/yaml/v3 # golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b ## explicit; go 1.23.0 golang.org/x/exp/slices -# golang.org/x/mod v0.32.0 +# golang.org/x/mod v0.33.0 ## explicit; go 1.24.0 golang.org/x/mod/semver -# golang.org/x/net v0.51.0 +# golang.org/x/net v0.52.0 ## explicit; go 1.25.0 golang.org/x/net/html golang.org/x/net/html/atom @@ -389,8 +389,8 @@ golang.org/x/net/websocket golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sync v0.19.0 -## explicit; go 1.24.0 +# golang.org/x/sync v0.20.0 +## explicit; go 1.25.0 golang.org/x/sync/errgroup golang.org/x/sync/singleflight # golang.org/x/sys v0.42.0 @@ -399,11 +399,11 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.40.0 -## explicit; go 1.24.0 +# golang.org/x/term v0.41.0 +## explicit; go 1.25.0 golang.org/x/term -# golang.org/x/text v0.34.0 -## explicit; go 1.24.0 +# golang.org/x/text v0.35.0 +## explicit; go 1.25.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -435,7 +435,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.12.0 ## explicit; go 1.23.0 golang.org/x/time/rate -# golang.org/x/tools v0.41.0 +# golang.org/x/tools v0.42.0 ## explicit; go 1.24.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/edge @@ -1137,8 +1137,8 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/zpages/features -# k8s.io/klog/v2 v2.130.1 -## explicit; go 1.18 +# k8s.io/klog/v2 v2.140.0 +## explicit; go 1.21 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock @@ -1150,7 +1150,7 @@ k8s.io/klog/v2/internal/sloghandler ## explicit; go 1.25.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 +# k8s.io/kube-openapi v0.0.0-20260317180543-43fb72c5454a ## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common @@ -1166,8 +1166,8 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 -## explicit; go 1.18 +# k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 +## explicit; go 1.23 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/internal/third_party/forked/golang/golang-lru @@ -1248,7 +1248,7 @@ sigs.k8s.io/json/internal/golang/encoding/json ## explicit; go 1.18 sigs.k8s.io/randfill sigs.k8s.io/randfill/bytesource -# sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 +# sigs.k8s.io/structured-merge-diff/v6 v6.3.2 ## explicit; go 1.23 sigs.k8s.io/structured-merge-diff/v6/fieldpath sigs.k8s.io/structured-merge-diff/v6/merge