From cff1a658b414943ad29c4e7aecfb83b3ba9d6214 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Thu, 23 Nov 2023 10:16:54 +0100 Subject: [PATCH 1/8] Feature for syncing pod and container metrics to database --- cmd/icinga-kubernetes/main.go | 44 +++++- pkg/schema/metric.go | 29 ++++ pkg/sync/metrics.go | 255 ++++++++++++++++++++++++++++++++++ pkg/sync/sync.go | 15 +- schema/mysql/schema.sql | 40 ++++++ 5 files changed, 375 insertions(+), 8 deletions(-) create mode 100644 pkg/schema/metric.go create mode 100644 pkg/sync/metrics.go diff --git a/cmd/icinga-kubernetes/main.go b/cmd/icinga-kubernetes/main.go index 830ff08e..a6bfde93 100644 --- a/cmd/icinga-kubernetes/main.go +++ b/cmd/icinga-kubernetes/main.go @@ -16,6 +16,7 @@ import ( kinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" kclientcmd "k8s.io/client-go/tools/clientcmd" + metricsv "k8s.io/metrics/pkg/client/clientset/versioned" ) func main() { @@ -30,6 +31,11 @@ func main() { logging.Fatal(errors.Wrap(err, "can't create Kubernetes client")) } + mk, err := metricsv.NewForConfig(kconfig) + if err != nil { + logging.Fatal(errors.Wrap(err, "can't create Kubernetes metrics client")) + } + flags, err := config.ParseFlags[internal.Flags]() if err != nil { logging.Fatal(errors.Wrap(err, "can't parse flags")) @@ -71,11 +77,8 @@ func main() { g, ctx := errgroup.WithContext(ctx) - forwardUpsertPodsChannel := make(chan database.Entity) - defer close(forwardUpsertPodsChannel) - - forwardDeletePodsChannel := make(chan any) - defer close(forwardDeletePodsChannel) + // node forward channels + forwardDeleteNodesToMetricChannel := make(chan contracts.KDelete) g.Go(func() error { return sync.NewSync( @@ -83,7 +86,10 @@ func main() { schema.NewNode, informers.Core().V1().Nodes().Informer(), logs.GetChildLogger("Nodes"), - ).Run(ctx) + ).Run( + ctx, + sync.WithForwardDeleteToMetric(forwardDeleteNodesToMetricChannel), + ) }) g.Go(func() error { @@ -95,9 +101,12 @@ func main() { ).Run(ctx) }) + // pod forward channels forwardUpsertPodsToLogChannel := make(chan contracts.KUpsert) forwardDeletePodsToLogChannel := make(chan contracts.KDelete) + forwardDeletePodsToMetricChannel := make(chan contracts.KDelete) + g.Go(func() error { defer close(forwardUpsertPodsToLogChannel) @@ -115,6 +124,7 @@ func main() { ) }) + // sync logs logSync := sync.NewLogSync(k, db, logs.GetChildLogger("ContainerLogs")) g.Go(func() error { @@ -125,6 +135,28 @@ func main() { return logSync.Run(ctx) }) + // sync pod and container metrics + metricsSync := sync.NewMetricSync(mk, db, logs.GetChildLogger("Metrics")) + + g.Go(func() error { + return metricsSync.Run(ctx) + }) + + g.Go(func() error { + return metricsSync.Clean(ctx, forwardDeletePodsToMetricChannel) + }) + + // sync node metrics + nodeMetricSync := sync.NewNodeMetricSync(mk, db, logs.GetChildLogger("NodeMetrics")) + + g.Go(func() error { + return nodeMetricSync.Run(ctx) + }) + + g.Go(func() error { + return nodeMetricSync.Clean(ctx, forwardDeleteNodesToMetricChannel) + }) + if err := g.Wait(); err != nil { logging.Fatal(errors.Wrap(err, "can't sync")) } diff --git a/pkg/schema/metric.go b/pkg/schema/metric.go new file mode 100644 index 00000000..9940aec0 --- /dev/null +++ b/pkg/schema/metric.go @@ -0,0 +1,29 @@ +package schema + +type PodMetric struct { + kmetaWithoutNamespace + ReferenceId []byte + Timestamp int64 + Cpu int64 + Memory int64 + Storage int64 +} + +type ContainerMetric struct { + kmetaWithoutNamespace + ContainerReferenceId []byte + PodReferenceId []byte + Timestamp int64 + Cpu int64 + Memory int64 + Storage int64 +} + +type NodeMetric struct { + kmetaWithoutNamespace + NodeId []byte + Timestamp int64 + Cpu int64 + Memory int64 + Storage int64 +} diff --git a/pkg/sync/metrics.go b/pkg/sync/metrics.go new file mode 100644 index 00000000..a95c1551 --- /dev/null +++ b/pkg/sync/metrics.go @@ -0,0 +1,255 @@ +package sync + +import ( + "context" + "crypto/sha1" + "fmt" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-kubernetes/pkg/contracts" + "github.com/icinga/icinga-kubernetes/pkg/schema" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "time" + + //kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metricsv "k8s.io/metrics/pkg/client/clientset/versioned" +) + +type MetricSync struct { + metricsClientset *metricsv.Clientset + db *database.DB + logger *logging.Logger +} + +func NewMetricSync(metricsClientset *metricsv.Clientset, db *database.DB, logger *logging.Logger) *MetricSync { + return &MetricSync{ + metricsClientset: metricsClientset, + db: db, + logger: logger, + } +} + +func (ms *MetricSync) Run(ctx context.Context) error { + + ms.logger.Info("Starting sync") + + g, ctx := errgroup.WithContext(ctx) + + upsertPodMetrics := make(chan database.Entity) + upsertContainerMetrics := make(chan database.Entity) + + g.Go(func() error { + defer close(upsertPodMetrics) + defer close(upsertContainerMetrics) + + for { + metrics, err := ms.metricsClientset.MetricsV1beta1().PodMetricses(kmetav1.NamespaceAll).List(ctx, kmetav1.ListOptions{}) + if err != nil { + return errors.Wrap(err, "error getting metrics from api") + } + + for _, pod := range metrics.Items { + + podId := sha1.Sum([]byte(pod.Namespace + "/" + pod.Name)) + + newPodMetric := &schema.PodMetric{ + ReferenceId: podId[:], + Timestamp: pod.Timestamp.UnixMilli(), + } + + for _, container := range pod.Containers { + + containerId := sha1.Sum([]byte(pod.Namespace + "/" + pod.Name + "/" + container.Name)) + + newContainerMetric := &schema.ContainerMetric{ + ContainerReferenceId: containerId[:], + PodReferenceId: podId[:], + Timestamp: pod.Timestamp.UnixMilli(), + Cpu: container.Usage.Cpu().MilliValue(), + Memory: container.Usage.Memory().Value(), + Storage: container.Usage.Storage().Value(), + } + + upsertContainerMetrics <- newContainerMetric + + newPodMetric.Cpu += container.Usage.Cpu().MilliValue() + newPodMetric.Memory += container.Usage.Memory().Value() + newPodMetric.Storage += container.Usage.Storage().Value() + } + + upsertPodMetrics <- newPodMetric + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second * 5): + //case <-time.After(time.Minute): + } + } + }) + + g.Go(func() error { + return ms.db.UpsertStreamedWithStatement(ctx, upsertPodMetrics, ms.podMetricUpsertStmt(), 5) + }) + + g.Go(func() error { + return ms.db.UpsertStreamedWithStatement(ctx, upsertContainerMetrics, ms.containerMetricUpsertStmt(), 6) + }) + + return g.Wait() +} + +func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { + + g, ctx := errgroup.WithContext(ctx) + + deletesPod := make(chan any) + deletesContainer := make(chan any) + + g.Go(func() error { + defer close(deletesPod) + defer close(deletesContainer) + + for { + select { + case kdelete, more := <-deleteChannel: + if !more { + return nil + } + + deletesPod <- kdelete.ID() + deletesContainer <- kdelete.ID() + + case <-ctx.Done(): + return ctx.Err() + } + } + }) + + g.Go(func() error { + return ms.db.DeleteStreamedByField(ctx, &schema.PodMetric{}, "reference_id", deletesPod) + }) + + g.Go(func() error { + return ms.db.DeleteStreamedByField(ctx, &schema.ContainerMetric{}, "pod_reference_id", deletesContainer) + }) + + return g.Wait() +} + +func (ms *MetricSync) podMetricUpsertStmt() string { + return fmt.Sprintf( + "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", + "pod_metric", + "reference_id, timestamp, cpu, memory, storage", + ":reference_id, :timestamp, :cpu, :memory, :storage", + "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + ) +} + +func (ms *MetricSync) containerMetricUpsertStmt() string { + return fmt.Sprintf( + "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", + "container_metric", + "container_reference_id, pod_reference_id, timestamp, cpu, memory, storage", + ":container_reference_id, :pod_reference_id, :timestamp, :cpu, :memory, :storage", + "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + ) +} + +type NodeMetricSync struct { + metricsClientset *metricsv.Clientset + db *database.DB + logger *logging.Logger +} + +func NewNodeMetricSync(metricClientset *metricsv.Clientset, db *database.DB, logger *logging.Logger) *NodeMetricSync { + return &NodeMetricSync{ + metricsClientset: metricClientset, + db: db, + logger: logger, + } +} + +func (nms *NodeMetricSync) Run(ctx context.Context) error { + + g, ctx := errgroup.WithContext(ctx) + + upsertNodeMetrics := make(chan database.Entity) + + g.Go(func() error { + + defer close(upsertNodeMetrics) + + for { + metrics, err := nms.metricsClientset.MetricsV1beta1().NodeMetricses().List(ctx, kmetav1.ListOptions{}) + if err != nil { + return errors.Wrap(err, "error getting node metrics from api") + } + + for _, node := range metrics.Items { + nodeId := sha1.Sum([]byte(node.Name)) + + newNodeMetric := &schema.NodeMetric{ + NodeId: nodeId[:], + Timestamp: node.Timestamp.UnixMilli(), + Cpu: node.Usage.Cpu().MilliValue(), + Memory: node.Usage.Memory().Value(), + Storage: node.Usage.Storage().Value(), + } + + upsertNodeMetrics <- newNodeMetric + } + } + }) + + g.Go(func() error { + return nms.db.UpsertStreamedWithStatement(ctx, upsertNodeMetrics, nms.nodeMetricUpsertStmt(), 5) + }) + + return g.Wait() +} + +func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { + + g, ctx := errgroup.WithContext(ctx) + + deletes := make(chan any) + + g.Go(func() error { + defer close(deletes) + + for { + select { + case kdelete, more := <-deleteChannel: + if !more { + return nil + } + + deletes <- kdelete.ID() + + case <-ctx.Done(): + return ctx.Err() + } + } + }) + + g.Go(func() error { + return nms.db.DeleteStreamedByField(ctx, &schema.NodeMetric{}, "node_id", deletes) + }) + + return g.Wait() +} + +func (nms *NodeMetricSync) nodeMetricUpsertStmt() string { + return fmt.Sprintf( + "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", + "node_metric", + "node_id, timestamp, cpu, memory, storage", + ":node_id, :timestamp, :cpu, :memory, :storage", + "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + ) +} diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go index 5c587010..24ee56ec 100644 --- a/pkg/sync/sync.go +++ b/pkg/sync/sync.go @@ -53,11 +53,18 @@ func WithForwardDeleteToLog(channel chan<- contracts.KDelete) SyncOption { } } +func WithForwardDeleteToMetric(channel chan<- contracts.KDelete) SyncOption { + return func(options *SyncOptions) { + options.forwardDeleteToMetricChannel = channel + } +} + type SyncOption func(options *SyncOptions) type SyncOptions struct { - forwardUpsertToLogChannel chan<- contracts.KUpsert - forwardDeleteToLogChannel chan<- contracts.KDelete + forwardUpsertToLogChannel chan<- contracts.KUpsert + forwardDeleteToLogChannel chan<- contracts.KDelete + forwardDeleteToMetricChannel chan<- contracts.KDelete } func NewOptionStorage(execOptions ...SyncOption) *SyncOptions { @@ -186,6 +193,10 @@ func (s *sync) Run(ctx context.Context, execOptions ...SyncOption) error { multiplexDelete.AddChannel(syncOptions.forwardDeleteToLogChannel) } + if syncOptions.forwardDeleteToMetricChannel != nil { + multiplexDelete.AddChannel(syncOptions.forwardDeleteToMetricChannel) + } + // run delete channel spreader g.Go(func() error { return multiplexDelete.Run(ctx) diff --git a/schema/mysql/schema.sql b/schema/mysql/schema.sql index 93aebbb1..78802d45 100644 --- a/schema/mysql/schema.sql +++ b/schema/mysql/schema.sql @@ -49,3 +49,43 @@ CREATE TABLE log ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_bin; + + +CREATE TABLE pod_metric +( + reference_id BINARY(20) NOT NULL, + timestamp BIGINT NOT NULL, + cpu BIGINT NOT NULL, + memory BIGINT NOT NULL, + storage BIGINT NOT NULL, + PRIMARY KEY (reference_id) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 + COLLATE = utf8mb4_bin; + + +CREATE TABLE container_metric +( + container_reference_id BINARY(20) NOT NULL, + pod_reference_id BINARY(20) NOT NULL, + timestamp BIGINT NOT NULL, + cpu BIGINT NOT NULL, + memory BIGINT NOT NULL, + storage BIGINT NOT NULL, + PRIMARY KEY (container_reference_id) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 + COLLATE = utf8mb4_bin; + + +CREATE TABLE node_metric +( + node_id BINARY(20) NOT NULL, + timestamp BIGINT NOT NULL, + cpu BIGINT NOT NULL, + memory BIGINT NOT NULL, + storage BIGINT NOT NULL, + PRIMARY KEY (node_id) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 + COLLATE = utf8mb4_bin; From 591a806a11808ba0802a145a560f161afb5ffed4 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Mon, 27 Nov 2023 10:39:11 +0100 Subject: [PATCH 2/8] Comment metrics --- pkg/sync/metrics.go | 89 ++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 38 deletions(-) diff --git a/pkg/sync/metrics.go b/pkg/sync/metrics.go index a95c1551..cb8a09c2 100644 --- a/pkg/sync/metrics.go +++ b/pkg/sync/metrics.go @@ -17,12 +17,14 @@ import ( metricsv "k8s.io/metrics/pkg/client/clientset/versioned" ) +// MetricSync syncs container and pod metrics to the database type MetricSync struct { metricsClientset *metricsv.Clientset db *database.DB logger *logging.Logger } +// NewMetricSync creates new MetricSync initialized with metricsClientset, database and logger func NewMetricSync(metricsClientset *metricsv.Clientset, db *database.DB, logger *logging.Logger) *MetricSync { return &MetricSync{ metricsClientset: metricsClientset, @@ -31,6 +33,31 @@ func NewMetricSync(metricsClientset *metricsv.Clientset, db *database.DB, logger } } +// podMetricUpsertStmt returns database upsert statement to upsert pod metrics +func (ms *MetricSync) podMetricUpsertStmt() string { + return fmt.Sprintf( + "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", + "pod_metric", + "reference_id, timestamp, cpu, memory, storage", + ":reference_id, :timestamp, :cpu, :memory, :storage", + "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + ) +} + +// containerMetricUpsertStmt returns database upsert statement to upsert container metrics +func (ms *MetricSync) containerMetricUpsertStmt() string { + return fmt.Sprintf( + "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", + "container_metric", + "container_reference_id, pod_reference_id, timestamp, cpu, memory, storage", + ":container_reference_id, :pod_reference_id, :timestamp, :cpu, :memory, :storage", + "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + ) +} + +// Run starts syncing the metrics to the database. Therefore, it gets a list of all pods +// and the belonging containers together with their metrics from the API every minute. +// The pod metrics are the container metrics summed up by pod. func (ms *MetricSync) Run(ctx context.Context) error { ms.logger.Info("Starting sync") @@ -85,23 +112,23 @@ func (ms *MetricSync) Run(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() - case <-time.After(time.Second * 5): - //case <-time.After(time.Minute): + case <-time.After(time.Minute): } } }) g.Go(func() error { - return ms.db.UpsertStreamedWithStatement(ctx, upsertPodMetrics, ms.podMetricUpsertStmt(), 5) + return ms.db.UpsertStreamed(ctx, upsertPodMetrics, database.WithStatement(ms.podMetricUpsertStmt(), 5)) }) g.Go(func() error { - return ms.db.UpsertStreamedWithStatement(ctx, upsertContainerMetrics, ms.containerMetricUpsertStmt(), 6) + return ms.db.UpsertStreamed(ctx, upsertContainerMetrics, database.WithStatement(ms.containerMetricUpsertStmt(), 6)) }) return g.Wait() } +// Clean deletes metrics from the database if the belonging pod is deleted func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { g, ctx := errgroup.WithContext(ctx) @@ -130,42 +157,24 @@ func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts. }) g.Go(func() error { - return ms.db.DeleteStreamedByField(ctx, &schema.PodMetric{}, "reference_id", deletesPod) + return ms.db.DeleteStreamed(ctx, &schema.PodMetric{}, deletesPod, database.ByColumn("reference_id")) }) g.Go(func() error { - return ms.db.DeleteStreamedByField(ctx, &schema.ContainerMetric{}, "pod_reference_id", deletesContainer) + return ms.db.DeleteStreamed(ctx, &schema.ContainerMetric{}, deletesContainer, database.ByColumn("pod_reference_id")) }) return g.Wait() } -func (ms *MetricSync) podMetricUpsertStmt() string { - return fmt.Sprintf( - "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", - "pod_metric", - "reference_id, timestamp, cpu, memory, storage", - ":reference_id, :timestamp, :cpu, :memory, :storage", - "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", - ) -} - -func (ms *MetricSync) containerMetricUpsertStmt() string { - return fmt.Sprintf( - "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", - "container_metric", - "container_reference_id, pod_reference_id, timestamp, cpu, memory, storage", - ":container_reference_id, :pod_reference_id, :timestamp, :cpu, :memory, :storage", - "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", - ) -} - +// NodeMetricSync syncs node metrics to the database type NodeMetricSync struct { metricsClientset *metricsv.Clientset db *database.DB logger *logging.Logger } +// NewNodeMetricSync creates new NodeMetricSync initialized with metricsClientset, database and logger func NewNodeMetricSync(metricClientset *metricsv.Clientset, db *database.DB, logger *logging.Logger) *NodeMetricSync { return &NodeMetricSync{ metricsClientset: metricClientset, @@ -174,6 +183,19 @@ func NewNodeMetricSync(metricClientset *metricsv.Clientset, db *database.DB, log } } +// nodeMetricUpsertStmt returns database upsert statement to upsert node metrics +func (nms *NodeMetricSync) nodeMetricUpsertStmt() string { + return fmt.Sprintf( + "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", + "node_metric", + "node_id, timestamp, cpu, memory, storage", + ":node_id, :timestamp, :cpu, :memory, :storage", + "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + ) +} + +// Run starts syncing the metrics to the database. Therefore, it gets a list of all nodes +// and the belonging metrics func (nms *NodeMetricSync) Run(ctx context.Context) error { g, ctx := errgroup.WithContext(ctx) @@ -207,12 +229,13 @@ func (nms *NodeMetricSync) Run(ctx context.Context) error { }) g.Go(func() error { - return nms.db.UpsertStreamedWithStatement(ctx, upsertNodeMetrics, nms.nodeMetricUpsertStmt(), 5) + return nms.db.UpsertStreamed(ctx, upsertNodeMetrics, database.WithStatement(nms.nodeMetricUpsertStmt(), 5)) }) return g.Wait() } +// Clean deletes metrics from the database if the belonging node is deleted func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { g, ctx := errgroup.WithContext(ctx) @@ -238,18 +261,8 @@ func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contr }) g.Go(func() error { - return nms.db.DeleteStreamedByField(ctx, &schema.NodeMetric{}, "node_id", deletes) + return nms.db.DeleteStreamed(ctx, &schema.NodeMetric{}, deletes, database.ByColumn("node_id")) }) return g.Wait() } - -func (nms *NodeMetricSync) nodeMetricUpsertStmt() string { - return fmt.Sprintf( - "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", - "node_metric", - "node_id, timestamp, cpu, memory, storage", - ":node_id, :timestamp, :cpu, :memory, :storage", - "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", - ) -} From 0b811100f214ed5a69b9b50030c8598341469e02 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Tue, 28 Nov 2023 09:04:25 +0100 Subject: [PATCH 3/8] Use new Upsert and Delete structs --- pkg/sync/metrics.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/sync/metrics.go b/pkg/sync/metrics.go index cb8a09c2..c0bd7115 100644 --- a/pkg/sync/metrics.go +++ b/pkg/sync/metrics.go @@ -118,11 +118,11 @@ func (ms *MetricSync) Run(ctx context.Context) error { }) g.Go(func() error { - return ms.db.UpsertStreamed(ctx, upsertPodMetrics, database.WithStatement(ms.podMetricUpsertStmt(), 5)) + return database.NewUpsert(ms.db).WithStatement(ms.podMetricUpsertStmt(), 5).Stream(ctx, upsertPodMetrics) }) g.Go(func() error { - return ms.db.UpsertStreamed(ctx, upsertContainerMetrics, database.WithStatement(ms.containerMetricUpsertStmt(), 6)) + return database.NewUpsert(ms.db).WithStatement(ms.containerMetricUpsertStmt(), 6).Stream(ctx, upsertContainerMetrics) }) return g.Wait() @@ -157,11 +157,11 @@ func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts. }) g.Go(func() error { - return ms.db.DeleteStreamed(ctx, &schema.PodMetric{}, deletesPod, database.ByColumn("reference_id")) + return database.NewDelete(ms.db).ByColumn("reference_id").Stream(ctx, &schema.PodMetric{}, deletesPod) }) g.Go(func() error { - return ms.db.DeleteStreamed(ctx, &schema.ContainerMetric{}, deletesContainer, database.ByColumn("pod_reference_id")) + return database.NewDelete(ms.db).ByColumn("pod_reference_id").Stream(ctx, &schema.ContainerMetric{}, deletesContainer) }) return g.Wait() @@ -229,7 +229,7 @@ func (nms *NodeMetricSync) Run(ctx context.Context) error { }) g.Go(func() error { - return nms.db.UpsertStreamed(ctx, upsertNodeMetrics, database.WithStatement(nms.nodeMetricUpsertStmt(), 5)) + return database.NewUpsert(nms.db).WithStatement(nms.nodeMetricUpsertStmt(), 5).Stream(ctx, upsertNodeMetrics) }) return g.Wait() @@ -261,7 +261,7 @@ func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contr }) g.Go(func() error { - return nms.db.DeleteStreamed(ctx, &schema.NodeMetric{}, deletes, database.ByColumn("node_id")) + return database.NewDelete(nms.db).ByColumn("node_id").Stream(ctx, &schema.NodeMetric{}, deletes) }) return g.Wait() From d5417bc60ab5d6b4c1bd52f71d14dd81925ef827 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Fri, 17 May 2024 15:41:30 +0200 Subject: [PATCH 4/8] Sync Prometheus metrics to the database --- cmd/icinga-kubernetes/main.go | 46 +- pkg/schema/metric.go | 43 +- pkg/sync/logs.go | 4 +- pkg/sync/metrics.go | 737 +++++++++++++++++++++++-------- schema/mysql/schema.sql | 55 ++- schema/mysql/start-mysql-kube.sh | 3 + 6 files changed, 636 insertions(+), 252 deletions(-) create mode 100755 schema/mysql/start-mysql-kube.sh diff --git a/cmd/icinga-kubernetes/main.go b/cmd/icinga-kubernetes/main.go index a6bfde93..c7f38c1d 100644 --- a/cmd/icinga-kubernetes/main.go +++ b/cmd/icinga-kubernetes/main.go @@ -12,28 +12,24 @@ import ( "github.com/icinga/icinga-kubernetes/pkg/sync" "github.com/okzk/sdnotify" "github.com/pkg/errors" + promapi "github.com/prometheus/client_golang/api" + promv1 "github.com/prometheus/client_golang/api/prometheus/v1" "golang.org/x/sync/errgroup" kinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" kclientcmd "k8s.io/client-go/tools/clientcmd" - metricsv "k8s.io/metrics/pkg/client/clientset/versioned" ) func main() { kconfig, err := kclientcmd.NewNonInteractiveDeferredLoadingClientConfig( kclientcmd.NewDefaultClientConfigLoadingRules(), &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { - logging.Fatal(errors.Wrap(err, "can't configure Kubernetes client")) + logging.Fatal(errors.Wrap(err, "can't configure Kubernetes promClient")) } - + k, err := kubernetes.NewForConfig(kconfig) if err != nil { - logging.Fatal(errors.Wrap(err, "can't create Kubernetes client")) - } - - mk, err := metricsv.NewForConfig(kconfig) - if err != nil { - logging.Fatal(errors.Wrap(err, "can't create Kubernetes metrics client")) + logging.Fatal(errors.Wrap(err, "can't create Kubernetes promClient")) } flags, err := config.ParseFlags[internal.Flags]() @@ -46,6 +42,13 @@ func main() { logging.Fatal(errors.Wrap(err, "can't create configuration")) } + promClient, err := promapi.NewClient(promapi.Config{Address: "http://localhost:9090"}) + if err != nil { + logging.Fatal(errors.Wrap(err, "error creating promClient")) + } + + promApiClient := promv1.NewAPI(promClient) + logs, err := logging.NewLoggingFromConfig("Icinga Kubernetes", &cfg.Logging) if err != nil { logging.Fatal(errors.Wrap(err, "can't configure logging")) @@ -105,7 +108,7 @@ func main() { forwardUpsertPodsToLogChannel := make(chan contracts.KUpsert) forwardDeletePodsToLogChannel := make(chan contracts.KDelete) - forwardDeletePodsToMetricChannel := make(chan contracts.KDelete) + //forwardDeletePodsToMetricChannel := make(chan contracts.KDelete) g.Go(func() error { @@ -135,27 +138,12 @@ func main() { return logSync.Run(ctx) }) - // sync pod and container metrics - metricsSync := sync.NewMetricSync(mk, db, logs.GetChildLogger("Metrics")) + // sync prometheus metrics + promMetricSync := sync.NewPromMetricSync(promApiClient, db, logs.GetChildLogger("PromMetrics")) - g.Go(func() error { - return metricsSync.Run(ctx) - }) + g.Go(func() error { return promMetricSync.Run(ctx) }) - g.Go(func() error { - return metricsSync.Clean(ctx, forwardDeletePodsToMetricChannel) - }) - - // sync node metrics - nodeMetricSync := sync.NewNodeMetricSync(mk, db, logs.GetChildLogger("NodeMetrics")) - - g.Go(func() error { - return nodeMetricSync.Run(ctx) - }) - - g.Go(func() error { - return nodeMetricSync.Clean(ctx, forwardDeleteNodesToMetricChannel) - }) + //g.Go(func() error {return promMetricSync.Clean(ctx, forwardDeletePodsToMetricChannel)}) if err := g.Wait(); err != nil { logging.Fatal(errors.Wrap(err, "can't sync")) diff --git a/pkg/schema/metric.go b/pkg/schema/metric.go index 9940aec0..273f5459 100644 --- a/pkg/schema/metric.go +++ b/pkg/schema/metric.go @@ -1,29 +1,36 @@ package schema -type PodMetric struct { +type PrometheusClusterMetric struct { kmetaWithoutNamespace - ReferenceId []byte - Timestamp int64 - Cpu int64 - Memory int64 - Storage int64 + Timestamp int64 + Group string + Name string + Value float64 } -type ContainerMetric struct { +type PrometheusNodeMetric struct { kmetaWithoutNamespace - ContainerReferenceId []byte - PodReferenceId []byte - Timestamp int64 - Cpu int64 - Memory int64 - Storage int64 + NodeId []byte + Timestamp int64 + Group string + Name string + Value float64 } -type NodeMetric struct { +type PrometheusPodMetric struct { kmetaWithoutNamespace - NodeId []byte + PodId []byte Timestamp int64 - Cpu int64 - Memory int64 - Storage int64 + Group string + Name string + Value float64 +} + +type PrometheusContainerMetric struct { + kmetaWithoutNamespace + ContainerId []byte + Timestamp int64 + Group string + Name string + Value float64 } diff --git a/pkg/sync/logs.go b/pkg/sync/logs.go index f1715efd..d4c9c75b 100644 --- a/pkg/sync/logs.go +++ b/pkg/sync/logs.go @@ -161,7 +161,7 @@ func (ls *LogSync) MaintainList(ctx context.Context, addChannel <-chan contracts }) g.Go(func() error { - return database.NewDelete(ls.db).ByColumn("reference_id").Stream(ctx, &schema.Log{}, deletes) + return database.NewDelete(ls.db, database.ByColumn("reference_id")).Stream(ctx, &schema.Log{}, deletes) }) return g.Wait() @@ -244,7 +244,7 @@ func (ls *LogSync) Run(ctx context.Context) error { }) g.Go(func() error { - return database.NewUpsert(ls.db).WithStatement(upsertStmt, 5).Stream(ctx, upserts) + return database.NewUpsert(ls.db, database.WithStatement(upsertStmt, 5)).Stream(ctx, upserts) }) return g.Wait() diff --git a/pkg/sync/metrics.go b/pkg/sync/metrics.go index c0bd7115..41c3dacb 100644 --- a/pkg/sync/metrics.go +++ b/pkg/sync/metrics.go @@ -6,263 +6,642 @@ import ( "fmt" "github.com/icinga/icinga-go-library/database" "github.com/icinga/icinga-go-library/logging" - "github.com/icinga/icinga-kubernetes/pkg/contracts" "github.com/icinga/icinga-kubernetes/pkg/schema" "github.com/pkg/errors" + v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" "golang.org/x/sync/errgroup" - kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "time" - - //kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metricsv "k8s.io/metrics/pkg/client/clientset/versioned" ) -// MetricSync syncs container and pod metrics to the database -type MetricSync struct { - metricsClientset *metricsv.Clientset - db *database.DB - logger *logging.Logger +type PromQuery struct { + metricGroup string + query string + nameLabel model.LabelName +} + +type PromMetricSync struct { + promApiClient v1.API + db *database.DB + logger *logging.Logger } -// NewMetricSync creates new MetricSync initialized with metricsClientset, database and logger -func NewMetricSync(metricsClientset *metricsv.Clientset, db *database.DB, logger *logging.Logger) *MetricSync { - return &MetricSync{ - metricsClientset: metricsClientset, - db: db, - logger: logger, +func NewPromMetricSync(promApiClient v1.API, db *database.DB, logger *logging.Logger) *PromMetricSync { + return &PromMetricSync{ + promApiClient: promApiClient, + db: db, + logger: logger, } } -// podMetricUpsertStmt returns database upsert statement to upsert pod metrics -func (ms *MetricSync) podMetricUpsertStmt() string { +// promMetricClusterUpsertStmt returns database upsert statement to upsert cluster metrics +func (pms *PromMetricSync) promMetricClusterUpsertStmt() string { return fmt.Sprintf( - "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", - "pod_metric", - "reference_id, timestamp, cpu, memory, storage", - ":reference_id, :timestamp, :cpu, :memory, :storage", - "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + `INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s`, + `prometheus_cluster_metric`, + "timestamp, `group`, name, value", + `:timestamp, :group, :name, :value`, + `value=VALUES(value)`, ) } -// containerMetricUpsertStmt returns database upsert statement to upsert container metrics -func (ms *MetricSync) containerMetricUpsertStmt() string { +// promMetricNodeUpsertStmt returns database upsert statement to upsert node metrics +func (pms *PromMetricSync) promMetricNodeUpsertStmt() string { return fmt.Sprintf( - "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", - "container_metric", - "container_reference_id, pod_reference_id, timestamp, cpu, memory, storage", - ":container_reference_id, :pod_reference_id, :timestamp, :cpu, :memory, :storage", - "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", + `INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s`, + `prometheus_node_metric`, + "node_id, timestamp, `group`, name, value", + `:node_id, :timestamp, :group, :name, :value`, + `value=VALUES(value)`, ) } -// Run starts syncing the metrics to the database. Therefore, it gets a list of all pods -// and the belonging containers together with their metrics from the API every minute. -// The pod metrics are the container metrics summed up by pod. -func (ms *MetricSync) Run(ctx context.Context) error { +// promMetricPodUpsertStmt returns database upsert statement to upsert pod metrics +func (pms *PromMetricSync) promMetricPodUpsertStmt() string { + return fmt.Sprintf( + `INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s`, + `prometheus_pod_metric`, + "pod_id, timestamp, `group`, name, value", + `:pod_id, :timestamp, :group, :name, :value`, + `value=VALUES(value)`, + ) +} - ms.logger.Info("Starting sync") +// promMetricContainerUpsertStmt returns database upsert statement to upsert container metrics +func (pms *PromMetricSync) promMetricContainerUpsertStmt() string { + return fmt.Sprintf( + `INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s`, + `prometheus_container_metric`, + "container_id, timestamp, `group`, name, value", + `:container_id, :timestamp, :group, :name, :value`, + `value=VALUES(value)`, + ) +} +// Run starts syncing the prometheus metrics to the database. +// Therefore, it gets a list of the metric queries. +func (pms *PromMetricSync) Run(ctx context.Context) error { g, ctx := errgroup.WithContext(ctx) + upsertClusterMetrics := make(chan database.Entity) + upsertNodeMetrics := make(chan database.Entity) upsertPodMetrics := make(chan database.Entity) upsertContainerMetrics := make(chan database.Entity) - g.Go(func() error { - defer close(upsertPodMetrics) - defer close(upsertContainerMetrics) + promQueriesCluster := []PromQuery{ + { + "node.count", + `count(group by (node) (kube_node_info))`, + "", + }, + { + "namespace.count", + `count(kube_namespace_created)`, + "", + }, + { + "pod.running", + `sum(kube_pod_status_phase{phase="Running"})`, + "", + }, + { + "pod.pending", + `sum(kube_pod_status_phase{phase="Pending"})`, + "", + }, + { + "pod.failed", + `sum(kube_pod_status_phase{phase="Failed"})`, + "", + }, + { + "pod.succeeded", + `sum(kube_pod_status_phase{phase="Succeeded"})`, + "", + }, + { + "cpu.usage", + `avg(sum by (instance, cpu) (rate(node_cpu_seconds_total{mode!~"idle|iowait|steal"}[1m])))`, + "", + }, + { + "memory.usage", + `sum(node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)`, + "", + }, + { + "qos_by_class", + `sum by (qos_class) (kube_pod_status_qos_class)`, + "", + }, + { + "network.received.bytes", + `sum by (device) (rate(node_network_receive_bytes_total{device!~"(veth|azv|lxc).*"}[2m]))`, + "", + }, + { + "network.transmitted.bytes", + `- sum by (device) (rate(node_network_transmit_bytes_total{device!~"(veth|azv|lxc).*"}[2m]))`, + "", + }, + { + "network.received.bytes.bydevice", + `sum by (device) (rate(node_network_receive_bytes_total{device!~"(veth|azv|lxc).*"}[2m]))`, + "device", + }, + } - for { - metrics, err := ms.metricsClientset.MetricsV1beta1().PodMetricses(kmetav1.NamespaceAll).List(ctx, kmetav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error getting metrics from api") - } + promQueriesNode := []PromQuery{ + { + "cpu.usage", + `avg by (instance) (sum by (instance, cpu) (rate(node_cpu_seconds_total{mode!~"idle|iowait|steal"}[1m])))`, + "", + }, + { + "cpu.request", + `sum by (node) (kube_pod_container_resource_requests{resource="cpu"})`, + "", + }, + { + "cpu.request.percentage", + `sum by (node) (kube_pod_container_resource_requests{resource="cpu"}) / on(node) group_left() (sum by (node) (machine_cpu_cores))`, + "", + }, + { + "cpu.limit", + `sum by (node) (kube_pod_container_resource_limits{resource="cpu"})`, + "", + }, + { + "cpu.limit.percentage", + `sum by (node) (kube_pod_container_resource_limits{resource="cpu"}) / on(node) group_left() (sum by (node) (machine_cpu_cores))`, + "", + }, + { + "memory.usage", + `sum by (instance) (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / sum by (instance) (node_memory_MemTotal_bytes)`, + "", + }, + { + "memory.request", + `sum by (node) (kube_pod_container_resource_requests{resource="memory"})`, + "", + }, + { + "memory.request.percentage", + `sum by (node) (kube_pod_container_resource_requests{resource="memory"}) / on(node) group_left() (sum by (node) (machine_memory_bytes))`, + "", + }, + { + "memory.limit", + `sum by (node) (kube_pod_container_resource_limits{resource="memory"})`, + "", + }, + { + "memory.limit.percentage", + `sum by (node) (kube_pod_container_resource_limits{resource="memory"}) / on(node) group_left() (sum by (node) (machine_memory_bytes))`, + "", + }, + { + "network.received.bytes", + `sum by (instance) (rate(node_network_receive_bytes_total[2m]))`, + "", + }, + { + "network.transmitted.bytes", + `- sum by (instance) (rate(node_network_transmit_bytes_total[2m]))`, + "", + }, + { + "filesystem.usage", + `sum by (instance, mountpoint) (1 - (node_filesystem_avail_bytes / node_filesystem_size_bytes))`, + "mountpoint", + }, + } - for _, pod := range metrics.Items { + promQueriesPod := []PromQuery{ + { + "cpu.usage", + `avg by (namespace, pod) (sum by (namespace, pod, cpu) (rate(node_cpu_seconds_total{mode!~"idle|iowait|steal"}[1m])))`, + "", + }, + { + "memory.usage", + `sum by (namespace, pod) ((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes))`, + "", + }, + { + "cpu.usage.cores", + `sum by (namespace, pod) (rate(container_cpu_usage_seconds_total[1m]))`, + "", + }, + { + "memory.usage.bytes", + `sum by (namespace, pod) (container_memory_usage_bytes)`, + "", + }, + { + "cpu.request", + `sum by (node, namespace, pod) (kube_pod_container_resource_requests{resource="cpu"})`, + "", + }, + { + "cpu.request.percentage", + `sum by (node, namespace, pod) (kube_pod_container_resource_requests{resource="cpu"}) / on(node) group_left() (sum by (node) (machine_cpu_cores))`, + "", + }, + { + "cpu.limit", + `sum by (node, namespace, pod) (kube_pod_container_resource_limits{resource="cpu"})`, + "", + }, + { + "cpu.limit.percentage", + `sum by (node, namespace, pod) (kube_pod_container_resource_limits{resource="cpu"}) / on(node) group_left() (sum by (node) (machine_cpu_cores))`, + "", + }, + { + "memory.request", + `sum by (node, namespace, pod) (kube_pod_container_resource_requests{resource="memory"})`, + "", + }, + { + "memory.request.percentage", + `sum by (node, namespace, pod) (kube_pod_container_resource_requests{resource="memory"}) / on(node) group_left() (sum by (node) (machine_memory_bytes))`, + "", + }, + { + "memory.limit", + `sum by (node, namespace, pod) (kube_pod_container_resource_limits{resource="memory"})`, + "", + }, + { + "memory.limit.percentage", + `sum by (node, namespace, pod) (kube_pod_container_resource_limits{resource="memory"}) / on(node) group_left() (sum by (node) (machine_memory_bytes))`, + "", + }, + } - podId := sha1.Sum([]byte(pod.Namespace + "/" + pod.Name)) + promQueriesContainer := []PromQuery{ + { + "cpu.request", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_requests{resource="cpu"})`, + "", + }, + { + "cpu.request.percentage", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_requests{resource="cpu"}) / on(node) group_left() (sum by (node) (machine_cpu_cores))`, + "", + }, + { + "cpu.limit", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_limits{resource="cpu"})`, + "", + }, + { + "cpu.limit.percentage", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_limits{resource="cpu"}) / on(node) group_left() (sum by (node) (machine_cpu_cores))`, + "", + }, + { + "memory.request", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_requests{resource="memory"})`, + "", + }, + { + "memory.request.percentage", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_requests{resource="memory"}) / on(node) group_left() (sum by (node) (machine_memory_bytes))`, + "", + }, + { + "memory.limit", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_limits{resource="memory"})`, + "", + }, + { + "memory.limit.percentage", + `sum by (node, namespace, pod, container) (kube_pod_container_resource_limits{resource="memory"}) / on(node) group_left() (sum by (node) (machine_memory_bytes))`, + "", + }, + } - newPodMetric := &schema.PodMetric{ - ReferenceId: podId[:], - Timestamp: pod.Timestamp.UnixMilli(), + //promv1.Range{ + // Start: time.Now().Add(time.Duration(-2) * time.Hour), + // End: time.Now(), + // Step: time.Second * 10, + //}, + + for _, promQuery := range promQueriesCluster { + promQuery := promQuery + + g.Go(func() error { + for { + result, warnings, err := pms.promApiClient.Query( + ctx, + promQuery.query, + time.Time{}, + //promQuery.queryRange, + ) + if err != nil { + return errors.Wrap(err, "error querying Prometheus") + } + if len(warnings) > 0 { + fmt.Printf("Warnings: %v\n", warnings) + } + if result == nil { + fmt.Println("No results found") + continue } - for _, container := range pod.Containers { + for _, res := range result.(model.Vector) { - containerId := sha1.Sum([]byte(pod.Namespace + "/" + pod.Name + "/" + container.Name)) + name := "" - newContainerMetric := &schema.ContainerMetric{ - ContainerReferenceId: containerId[:], - PodReferenceId: podId[:], - Timestamp: pod.Timestamp.UnixMilli(), - Cpu: container.Usage.Cpu().MilliValue(), - Memory: container.Usage.Memory().Value(), - Storage: container.Usage.Storage().Value(), + if promQuery.nameLabel != "" { + name = string(res.Metric[promQuery.nameLabel]) } - upsertContainerMetrics <- newContainerMetric + newClusterMetric := &schema.PrometheusClusterMetric{ + Timestamp: (res.Timestamp.UnixNano() - res.Timestamp.UnixNano()%(60*1000000000)) / 1000000, + Group: promQuery.metricGroup, + Name: name, + Value: float64(res.Value), + } - newPodMetric.Cpu += container.Usage.Cpu().MilliValue() - newPodMetric.Memory += container.Usage.Memory().Value() - newPodMetric.Storage += container.Usage.Storage().Value() + select { + case upsertClusterMetrics <- newClusterMetric: + case <-ctx.Done(): + return ctx.Err() + } } - upsertPodMetrics <- newPodMetric + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second * 55): + } } + }) + } - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(time.Minute): - } - } - }) + for _, promQuery := range promQueriesNode { + promQuery := promQuery + + g.Go(func() error { + for { + result, warnings, err := pms.promApiClient.Query( + ctx, + promQuery.query, + time.Time{}, + //promQuery.queryRange, + ) + if err != nil { + return errors.Wrap(err, "error querying Prometheus") + } + if len(warnings) > 0 { + fmt.Printf("Warnings: %v\n", warnings) + } + if result == nil { + fmt.Println("No results found") + continue + } - g.Go(func() error { - return database.NewUpsert(ms.db).WithStatement(ms.podMetricUpsertStmt(), 5).Stream(ctx, upsertPodMetrics) - }) + for _, res := range result.(model.Vector) { + nodeName := res.Metric["node"] - g.Go(func() error { - return database.NewUpsert(ms.db).WithStatement(ms.containerMetricUpsertStmt(), 6).Stream(ctx, upsertContainerMetrics) - }) + if nodeName == "" { + nodeName = res.Metric["instance"] + } - return g.Wait() -} + nodeId := sha1.Sum([]byte(nodeName)) -// Clean deletes metrics from the database if the belonging pod is deleted -func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { + name := "" - g, ctx := errgroup.WithContext(ctx) + if promQuery.nameLabel != "" { + name = string(res.Metric[promQuery.nameLabel]) + } - deletesPod := make(chan any) - deletesContainer := make(chan any) + newNodeMetric := &schema.PrometheusNodeMetric{ + NodeId: nodeId[:], + Timestamp: (res.Timestamp.UnixNano() - res.Timestamp.UnixNano()%(60*1000000000)) / 1000000, + Group: promQuery.metricGroup, + Name: name, + Value: float64(res.Value), + } - g.Go(func() error { - defer close(deletesPod) - defer close(deletesContainer) - - for { - select { - case kdelete, more := <-deleteChannel: - if !more { - return nil + select { + case upsertNodeMetrics <- newNodeMetric: + case <-ctx.Done(): + return ctx.Err() + } } - deletesPod <- kdelete.ID() - deletesContainer <- kdelete.ID() - - case <-ctx.Done(): - return ctx.Err() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second * 55): + } } - } - }) + }) + } - g.Go(func() error { - return database.NewDelete(ms.db).ByColumn("reference_id").Stream(ctx, &schema.PodMetric{}, deletesPod) - }) + for _, promQuery := range promQueriesPod { + promQuery := promQuery + + g.Go(func() error { + for { + result, warnings, err := pms.promApiClient.Query( + ctx, + promQuery.query, + time.Time{}, + //promQuery.queryRange, + ) + if err != nil { + return errors.Wrap(err, "error querying Prometheus") + } + if len(warnings) > 0 { + fmt.Printf("Warnings: %v\n", warnings) + } + if result == nil { + fmt.Println("No results found") + continue + } - g.Go(func() error { - return database.NewDelete(ms.db).ByColumn("pod_reference_id").Stream(ctx, &schema.ContainerMetric{}, deletesContainer) - }) + for _, res := range result.(model.Vector) { - return g.Wait() -} + podId := sha1.Sum([]byte(res.Metric["namespace"] + "/" + res.Metric["pod"])) -// NodeMetricSync syncs node metrics to the database -type NodeMetricSync struct { - metricsClientset *metricsv.Clientset - db *database.DB - logger *logging.Logger -} + name := "" -// NewNodeMetricSync creates new NodeMetricSync initialized with metricsClientset, database and logger -func NewNodeMetricSync(metricClientset *metricsv.Clientset, db *database.DB, logger *logging.Logger) *NodeMetricSync { - return &NodeMetricSync{ - metricsClientset: metricClientset, - db: db, - logger: logger, - } -} + if promQuery.nameLabel != "" { + name = string(res.Metric[promQuery.nameLabel]) + } -// nodeMetricUpsertStmt returns database upsert statement to upsert node metrics -func (nms *NodeMetricSync) nodeMetricUpsertStmt() string { - return fmt.Sprintf( - "INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s", - "node_metric", - "node_id, timestamp, cpu, memory, storage", - ":node_id, :timestamp, :cpu, :memory, :storage", - "timestamp=VALUES(timestamp), cpu=VALUES(cpu), memory=VALUES(memory), storage=VALUES(storage)", - ) -} + newPodMetric := &schema.PrometheusPodMetric{ + PodId: podId[:], + Timestamp: (res.Timestamp.UnixNano() - res.Timestamp.UnixNano()%(60*1000000000)) / 1000000, + Group: promQuery.metricGroup, + Name: name, + Value: float64(res.Value), + } -// Run starts syncing the metrics to the database. Therefore, it gets a list of all nodes -// and the belonging metrics -func (nms *NodeMetricSync) Run(ctx context.Context) error { + select { + case upsertPodMetrics <- newPodMetric: + case <-ctx.Done(): + return ctx.Err() + } + } - g, ctx := errgroup.WithContext(ctx) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second * 55): + } + } + }) + } - upsertNodeMetrics := make(chan database.Entity) + for _, promQuery := range promQueriesContainer { + promQuery := promQuery + + g.Go(func() error { + for { + result, warnings, err := pms.promApiClient.Query( + ctx, + promQuery.query, + time.Time{}, + //promQuery.queryRange, + ) + if err != nil { + return errors.Wrap(err, "error querying Prometheus") + } + if len(warnings) > 0 { + fmt.Printf("Warnings: %v\n", warnings) + } + if result == nil { + fmt.Println("No results found") + continue + } - g.Go(func() error { + for _, res := range result.(model.Vector) { + containerId := sha1.Sum([]byte(res.Metric["namespace"] + "/" + res.Metric["pod"] + "/" + res.Metric["container"])) - defer close(upsertNodeMetrics) + name := "" - for { - metrics, err := nms.metricsClientset.MetricsV1beta1().NodeMetricses().List(ctx, kmetav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error getting node metrics from api") - } + if promQuery.nameLabel != "" { + name = string(res.Metric[promQuery.nameLabel]) + } - for _, node := range metrics.Items { - nodeId := sha1.Sum([]byte(node.Name)) + newContainerMetric := &schema.PrometheusContainerMetric{ + ContainerId: containerId[:], + Timestamp: (res.Timestamp.UnixNano() - res.Timestamp.UnixNano()%(60*1000000000)) / 1000000, + Group: promQuery.metricGroup, + Name: name, + Value: float64(res.Value), + } - newNodeMetric := &schema.NodeMetric{ - NodeId: nodeId[:], - Timestamp: node.Timestamp.UnixMilli(), - Cpu: node.Usage.Cpu().MilliValue(), - Memory: node.Usage.Memory().Value(), - Storage: node.Usage.Storage().Value(), + select { + case upsertContainerMetrics <- newContainerMetric: + case <-ctx.Done(): + return ctx.Err() + } } - upsertNodeMetrics <- newNodeMetric + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second * 55): + } } - } - }) + }) + } g.Go(func() error { - return database.NewUpsert(nms.db).WithStatement(nms.nodeMetricUpsertStmt(), 5).Stream(ctx, upsertNodeMetrics) + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricClusterUpsertStmt(), 3)).Stream(ctx, upsertClusterMetrics) }) - return g.Wait() -} - -// Clean deletes metrics from the database if the belonging node is deleted -func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { - - g, ctx := errgroup.WithContext(ctx) - - deletes := make(chan any) - g.Go(func() error { - defer close(deletes) - - for { - select { - case kdelete, more := <-deleteChannel: - if !more { - return nil - } - - deletes <- kdelete.ID() + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricNodeUpsertStmt(), 4)).Stream(ctx, upsertNodeMetrics) + }) - case <-ctx.Done(): - return ctx.Err() - } - } + g.Go(func() error { + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricPodUpsertStmt(), 4)).Stream(ctx, upsertPodMetrics) }) g.Go(func() error { - return database.NewDelete(nms.db).ByColumn("node_id").Stream(ctx, &schema.NodeMetric{}, deletes) + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricContainerUpsertStmt(), 4)).Stream(ctx, upsertContainerMetrics) }) return g.Wait() } + +// Clean deletes metrics from the database if the belonging pod is deleted +//func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { +// +// g, ctx := errgroup.WithContext(ctx) +// +// deletesPod := make(chan any) +// deletesContainer := make(chan any) +// +// g.Go(func() error { +// defer close(deletesPod) +// defer close(deletesContainer) +// +// for { +// select { +// case kdelete, more := <-deleteChannel: +// if !more { +// return nil +// } +// +// deletesPod <- kdelete.ID() +// deletesContainer <- kdelete.ID() +// +// case <-ctx.Done(): +// return ctx.Err() +// } +// } +// }) +// +// g.Go(func() error { +// return database.NewDelete(ms.db, database.ByColumn("reference_id")).Stream(ctx, &schema.PodMetric{}, deletesPod) +// }) +// +// g.Go(func() error { +// return database.NewDelete(ms.db, database.ByColumn("pod_reference_id")).Stream(ctx, &schema.ContainerMetric{}, deletesContainer) +// }) +// +// return g.Wait() +//} +// +// Clean deletes metrics from the database if the belonging node is deleted +//func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { +// +// g, ctx := errgroup.WithContext(ctx) +// +// deletes := make(chan any) +// +// g.Go(func() error { +// defer close(deletes) +// +// for { +// select { +// case kdelete, more := <-deleteChannel: +// if !more { +// return nil +// } +// +// deletes <- kdelete.ID() +// +// case <-ctx.Done(): +// return ctx.Err() +// } +// } +// }) +// +// g.Go(func() error { +// return database.NewDelete(nms.db, database.ByColumn("node_id")).Stream(ctx, &schema.NodeMetric{}, deletes) +// }) +// +// return g.Wait() +//} diff --git a/schema/mysql/schema.sql b/schema/mysql/schema.sql index 78802d45..cce10796 100644 --- a/schema/mysql/schema.sql +++ b/schema/mysql/schema.sql @@ -50,42 +50,49 @@ CREATE TABLE log DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_bin; - -CREATE TABLE pod_metric +CREATE TABLE prometheus_cluster_metric ( - reference_id BINARY(20) NOT NULL, - timestamp BIGINT NOT NULL, - cpu BIGINT NOT NULL, - memory BIGINT NOT NULL, - storage BIGINT NOT NULL, - PRIMARY KEY (reference_id) + timestamp BIGINT NOT NULL, + `group` VARCHAR(255) NOT NULL, + name VARCHAR(255) NOT NULL, + value DOUBLE NOT NULL, + PRIMARY KEY (timestamp, `group`, name) ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_bin; - -CREATE TABLE container_metric +CREATE TABLE prometheus_node_metric ( - container_reference_id BINARY(20) NOT NULL, - pod_reference_id BINARY(20) NOT NULL, - timestamp BIGINT NOT NULL, - cpu BIGINT NOT NULL, - memory BIGINT NOT NULL, - storage BIGINT NOT NULL, - PRIMARY KEY (container_reference_id) + node_id BINARY(20) NOT NULL, + timestamp BIGINT NOT NULL, + `group` VARCHAR(255) NOT NULL, + name VARCHAR(255) NOT NULL, + value DOUBLE NOT NULL, + PRIMARY KEY (node_id, timestamp, `group`, name) ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_bin; +CREATE TABLE prometheus_pod_metric +( + pod_id BINARY(20) NOT NULL, + timestamp BIGINT NOT NULL, + `group` VARCHAR(255) NOT NULL, + name VARCHAR(255) NOT NULL, + value DOUBLE NOT NULL, + PRIMARY KEY (pod_id, timestamp, `group`, name) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 + COLLATE = utf8mb4_bin; -CREATE TABLE node_metric +CREATE TABLE prometheus_container_metric ( - node_id BINARY(20) NOT NULL, - timestamp BIGINT NOT NULL, - cpu BIGINT NOT NULL, - memory BIGINT NOT NULL, - storage BIGINT NOT NULL, - PRIMARY KEY (node_id) + container_id BINARY(20) NOT NULL, + timestamp BIGINT NOT NULL, + `group` VARCHAR(255) NOT NULL, + name VARCHAR(255) NOT NULL, + value DOUBLE NOT NULL, + PRIMARY KEY (container_id, timestamp, `group`, name) ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_bin; diff --git a/schema/mysql/start-mysql-kube.sh b/schema/mysql/start-mysql-kube.sh new file mode 100755 index 00000000..0329fde0 --- /dev/null +++ b/schema/mysql/start-mysql-kube.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker run -p 3306:3306 -v /var/icinga-kubernetes/persistent-database:/var/lib/mysql --name kubedb -itd mysql_pod From e59b263fe5624cf0a9ef513b33f1852d4b0058f8 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Tue, 21 May 2024 10:48:37 +0200 Subject: [PATCH 5/8] Remove logger from PromMetricSync --- cmd/icinga-kubernetes/main.go | 4 ++-- pkg/sync/metrics.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/icinga-kubernetes/main.go b/cmd/icinga-kubernetes/main.go index c7f38c1d..2756f338 100644 --- a/cmd/icinga-kubernetes/main.go +++ b/cmd/icinga-kubernetes/main.go @@ -26,7 +26,7 @@ func main() { if err != nil { logging.Fatal(errors.Wrap(err, "can't configure Kubernetes promClient")) } - + k, err := kubernetes.NewForConfig(kconfig) if err != nil { logging.Fatal(errors.Wrap(err, "can't create Kubernetes promClient")) @@ -139,7 +139,7 @@ func main() { }) // sync prometheus metrics - promMetricSync := sync.NewPromMetricSync(promApiClient, db, logs.GetChildLogger("PromMetrics")) + promMetricSync := sync.NewPromMetricSync(promApiClient, db) g.Go(func() error { return promMetricSync.Run(ctx) }) diff --git a/pkg/sync/metrics.go b/pkg/sync/metrics.go index 41c3dacb..90a4cb11 100644 --- a/pkg/sync/metrics.go +++ b/pkg/sync/metrics.go @@ -5,7 +5,6 @@ import ( "crypto/sha1" "fmt" "github.com/icinga/icinga-go-library/database" - "github.com/icinga/icinga-go-library/logging" "github.com/icinga/icinga-kubernetes/pkg/schema" "github.com/pkg/errors" v1 "github.com/prometheus/client_golang/api/prometheus/v1" @@ -14,23 +13,24 @@ import ( "time" ) +// PromQuery defines a prometheus query with the metric group, the query and the name label type PromQuery struct { metricGroup string query string nameLabel model.LabelName } +// PromMetricSync synchronizes prometheus metrics from the prometheus API to the database type PromMetricSync struct { promApiClient v1.API db *database.DB - logger *logging.Logger } -func NewPromMetricSync(promApiClient v1.API, db *database.DB, logger *logging.Logger) *PromMetricSync { +// NewPromMetricSync creates a new PromMetricSync +func NewPromMetricSync(promApiClient v1.API, db *database.DB) *PromMetricSync { return &PromMetricSync{ promApiClient: promApiClient, db: db, - logger: logger, } } @@ -222,12 +222,12 @@ func (pms *PromMetricSync) Run(ctx context.Context) error { promQueriesPod := []PromQuery{ { "cpu.usage", - `avg by (namespace, pod) (sum by (namespace, pod, cpu) (rate(node_cpu_seconds_total{mode!~"idle|iowait|steal"}[1m])))`, + `sum by (node, namespace, pod) (rate(container_cpu_usage_seconds_total[1m]))`, "", }, { "memory.usage", - `sum by (namespace, pod) ((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes))`, + `sum by (node, namespace, pod) (container_memory_usage_bytes) / on (node) group_left(instance) label_replace(node_memory_MemTotal_bytes, "node", "$1", "instance", "(.*)")`, "", }, { From fc522e1ae4142d1d2b7968442b7ba24dec13c970 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Tue, 21 May 2024 10:49:01 +0200 Subject: [PATCH 6/8] Skip pod if no podname is set --- pkg/sync/metrics.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/sync/metrics.go b/pkg/sync/metrics.go index 90a4cb11..a117ba40 100644 --- a/pkg/sync/metrics.go +++ b/pkg/sync/metrics.go @@ -469,6 +469,10 @@ func (pms *PromMetricSync) Run(ctx context.Context) error { for _, res := range result.(model.Vector) { + if res.Metric["pod"] == "" { + continue + } + podId := sha1.Sum([]byte(res.Metric["namespace"] + "/" + res.Metric["pod"])) name := "" From 3e1b82e4e08c5da53515ececb27a74afb2cf60fa Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Tue, 21 May 2024 11:20:59 +0200 Subject: [PATCH 7/8] Add Prometheus configs --- cmd/icinga-kubernetes/main.go | 8 +-- config.example.yml | 9 ++++ internal/config.go | 10 +++- pkg/metrics/config.go | 18 +++++++ pkg/{sync => metrics}/metrics.go | 87 ++------------------------------ 5 files changed, 44 insertions(+), 88 deletions(-) create mode 100644 pkg/metrics/config.go rename pkg/{sync => metrics}/metrics.go (87%) diff --git a/cmd/icinga-kubernetes/main.go b/cmd/icinga-kubernetes/main.go index 2756f338..a73ccc02 100644 --- a/cmd/icinga-kubernetes/main.go +++ b/cmd/icinga-kubernetes/main.go @@ -8,6 +8,7 @@ import ( "github.com/icinga/icinga-go-library/logging" "github.com/icinga/icinga-kubernetes/internal" "github.com/icinga/icinga-kubernetes/pkg/contracts" + "github.com/icinga/icinga-kubernetes/pkg/metrics" "github.com/icinga/icinga-kubernetes/pkg/schema" "github.com/icinga/icinga-kubernetes/pkg/sync" "github.com/okzk/sdnotify" @@ -18,6 +19,7 @@ import ( kinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" kclientcmd "k8s.io/client-go/tools/clientcmd" + "strconv" ) func main() { @@ -42,7 +44,7 @@ func main() { logging.Fatal(errors.Wrap(err, "can't create configuration")) } - promClient, err := promapi.NewClient(promapi.Config{Address: "http://localhost:9090"}) + promClient, err := promapi.NewClient(promapi.Config{Address: cfg.Prometheus.Host + ":" + strconv.Itoa(cfg.Prometheus.Port)}) if err != nil { logging.Fatal(errors.Wrap(err, "error creating promClient")) } @@ -139,12 +141,10 @@ func main() { }) // sync prometheus metrics - promMetricSync := sync.NewPromMetricSync(promApiClient, db) + promMetricSync := metrics.NewPromMetricSync(promApiClient, db) g.Go(func() error { return promMetricSync.Run(ctx) }) - //g.Go(func() error {return promMetricSync.Clean(ctx, forwardDeletePodsToMetricChannel)}) - if err := g.Wait(); err != nil { logging.Fatal(errors.Wrap(err, "can't sync")) } diff --git a/config.example.yml b/config.example.yml index bb43e014..b1cca1a3 100644 --- a/config.example.yml +++ b/config.example.yml @@ -39,3 +39,12 @@ logging: # Valid units are "ms", "s", "m", "h". # Defaults to "20s". # interval: 20s + +# Configuration for Prometheus metrics API. +prometheus: + + # Prometheus host +# host: http://localhost + + # Prometheus port +# port: 9090 diff --git a/internal/config.go b/internal/config.go index 60f6fb51..11e85985 100644 --- a/internal/config.go +++ b/internal/config.go @@ -3,12 +3,14 @@ package internal import ( "github.com/icinga/icinga-go-library/database" "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-kubernetes/pkg/metrics" ) // Config defines Icinga Kubernetes config. type Config struct { - Database database.Config `yaml:"database"` - Logging logging.Config `yaml:"logging"` + Database database.Config `yaml:"database"` + Logging logging.Config `yaml:"logging"` + Prometheus metrics.PrometheusConfig `yaml:"prometheus"` } // Validate checks constraints in the supplied configuration and returns an error if they are violated. @@ -21,5 +23,9 @@ func (c *Config) Validate() error { return err } + if err := c.Prometheus.Validate(); err != nil { + return err + } + return nil } diff --git a/pkg/metrics/config.go b/pkg/metrics/config.go new file mode 100644 index 00000000..cb664493 --- /dev/null +++ b/pkg/metrics/config.go @@ -0,0 +1,18 @@ +package metrics + +import "github.com/pkg/errors" + +// PrometheusConfig defines Prometheus configuration. +type PrometheusConfig struct { + Host string `yaml:"host"` + Port int `yaml:"port"` +} + +// Validate checks constraints in the supplied Prometheus configuration and returns an error if they are violated. +func (c *PrometheusConfig) Validate() error { + if c.Host == "" { + return errors.New("Prometheus host missing") + } + + return nil +} diff --git a/pkg/sync/metrics.go b/pkg/metrics/metrics.go similarity index 87% rename from pkg/sync/metrics.go rename to pkg/metrics/metrics.go index a117ba40..f914f2d3 100644 --- a/pkg/sync/metrics.go +++ b/pkg/metrics/metrics.go @@ -1,4 +1,4 @@ -package sync +package metrics import ( "context" @@ -325,12 +325,6 @@ func (pms *PromMetricSync) Run(ctx context.Context) error { }, } - //promv1.Range{ - // Start: time.Now().Add(time.Duration(-2) * time.Hour), - // End: time.Now(), - // Step: time.Second * 10, - //}, - for _, promQuery := range promQueriesCluster { promQuery := promQuery @@ -561,91 +555,20 @@ func (pms *PromMetricSync) Run(ctx context.Context) error { } g.Go(func() error { - return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricClusterUpsertStmt(), 3)).Stream(ctx, upsertClusterMetrics) + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricClusterUpsertStmt(), 4)).Stream(ctx, upsertClusterMetrics) }) g.Go(func() error { - return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricNodeUpsertStmt(), 4)).Stream(ctx, upsertNodeMetrics) + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricNodeUpsertStmt(), 5)).Stream(ctx, upsertNodeMetrics) }) g.Go(func() error { - return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricPodUpsertStmt(), 4)).Stream(ctx, upsertPodMetrics) + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricPodUpsertStmt(), 5)).Stream(ctx, upsertPodMetrics) }) g.Go(func() error { - return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricContainerUpsertStmt(), 4)).Stream(ctx, upsertContainerMetrics) + return database.NewUpsert(pms.db, database.WithStatement(pms.promMetricContainerUpsertStmt(), 5)).Stream(ctx, upsertContainerMetrics) }) return g.Wait() } - -// Clean deletes metrics from the database if the belonging pod is deleted -//func (ms *MetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { -// -// g, ctx := errgroup.WithContext(ctx) -// -// deletesPod := make(chan any) -// deletesContainer := make(chan any) -// -// g.Go(func() error { -// defer close(deletesPod) -// defer close(deletesContainer) -// -// for { -// select { -// case kdelete, more := <-deleteChannel: -// if !more { -// return nil -// } -// -// deletesPod <- kdelete.ID() -// deletesContainer <- kdelete.ID() -// -// case <-ctx.Done(): -// return ctx.Err() -// } -// } -// }) -// -// g.Go(func() error { -// return database.NewDelete(ms.db, database.ByColumn("reference_id")).Stream(ctx, &schema.PodMetric{}, deletesPod) -// }) -// -// g.Go(func() error { -// return database.NewDelete(ms.db, database.ByColumn("pod_reference_id")).Stream(ctx, &schema.ContainerMetric{}, deletesContainer) -// }) -// -// return g.Wait() -//} -// -// Clean deletes metrics from the database if the belonging node is deleted -//func (nms *NodeMetricSync) Clean(ctx context.Context, deleteChannel <-chan contracts.KDelete) error { -// -// g, ctx := errgroup.WithContext(ctx) -// -// deletes := make(chan any) -// -// g.Go(func() error { -// defer close(deletes) -// -// for { -// select { -// case kdelete, more := <-deleteChannel: -// if !more { -// return nil -// } -// -// deletes <- kdelete.ID() -// -// case <-ctx.Done(): -// return ctx.Err() -// } -// } -// }) -// -// g.Go(func() error { -// return database.NewDelete(nms.db, database.ByColumn("node_id")).Stream(ctx, &schema.NodeMetric{}, deletes) -// }) -// -// return g.Wait() -//} From 2ff577ed4a3c7c9c18e2150cb0f5534da4e5eef8 Mon Sep 17 00:00:00 2001 From: Johannes Rauh Date: Tue, 21 May 2024 11:29:26 +0200 Subject: [PATCH 8/8] Update go.mod and go.sum --- go.mod | 43 ++++++++++++--------- go.sum | 115 ++++++++++++++++++++++++++++++++++++--------------------- 2 files changed, 97 insertions(+), 61 deletions(-) diff --git a/go.mod b/go.mod index 4fba6598..66a9dfea 100644 --- a/go.mod +++ b/go.mod @@ -1,32 +1,39 @@ module github.com/icinga/icinga-kubernetes -go 1.18 +go 1.22.0 + +toolchain go1.22.2 require ( + github.com/dustin/go-humanize v1.0.1 github.com/icinga/icinga-go-library v0.0.0-20231121080432-c03a40718ed9 github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.48.0 go.uber.org/zap v1.26.0 golang.org/x/sync v0.5.0 - k8s.io/apimachinery v0.28.2 - k8s.io/client-go v0.28.2 + k8s.io/api v0.30.0 + k8s.io/apimachinery v0.30.0 + k8s.io/client-go v0.30.0 + k8s.io/metrics v0.30.0 ) require ( github.com/creasty/defaults v1.7.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fatih/color v1.16.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/goccy/go-yaml v1.11.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.4.0 // indirect github.com/imdario/mergo v0.3.6 // indirect @@ -41,27 +48,27 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/prometheus/client_model v0.5.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/journald v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/net v0.16.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.14.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.28.2 // indirect - k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index a789518d..2d0ddae3 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,21 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creasty/defaults v1.7.0 h1:eNdqZvc5B509z18lD8yc212CAqJNvfT1Jq6L8WowdBA= github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -18,32 +23,35 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/goccy/go-yaml v1.11.2 h1:joq77SxuyIs9zzxEjgyLBugMQ9NEgTWxXfz2wVqwAaQ= github.com/goccy/go-yaml v1.11.2/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/icinga/icinga-go-library v0.0.0-20231121080432-c03a40718ed9 h1:C+BYgMhhitPxEt9pQ/O/ssQ90mIrm7+YG4KDe9UFFBA= github.com/icinga/icinga-go-library v0.0.0-20231121080432-c03a40718ed9/go.mod h1:Apo85zqPgovShDWxx/TlUN/bfl+RaPviTafT666iJyw= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -53,17 +61,21 @@ github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -83,15 +95,28 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs= github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU= @@ -104,9 +129,11 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -114,7 +141,8 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -124,10 +152,10 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -139,22 +167,23 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -163,10 +192,8 @@ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSm golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -178,21 +205,23 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= -k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= -k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= +k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= +k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= +k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/metrics v0.30.0 h1:tqB+T0GJY288KahaO3Eb41HaDVeLR18gBmyPo0R417s= +k8s.io/metrics v0.30.0/go.mod h1:nSDA8V19WHhCTBhRYuyzJT9yPJBxSpqbyrGCCQ4jPj4= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=