1
0
mirror of https://github.com/coreos/prometheus-operator.git synced 2026-02-05 15:46:31 +01:00

chore: replace go kit log in prometheus package (#6807)

This commit is contained in:
Jonathan
2024-08-07 11:32:21 -03:00
committed by GitHub
parent 8f97358a75
commit e84c86213c
3 changed files with 89 additions and 82 deletions

View File

@@ -382,7 +382,7 @@ func run(fs *flag.FlagSet) int {
var po *prometheuscontroller.Operator
if prometheusSupported {
po, err = prometheuscontroller.New(ctx, restConfig, cfg, goKitLogger, r, promControllerOptions...)
po, err = prometheuscontroller.New(ctx, restConfig, cfg, logger, goKitLogger, r, promControllerOptions...)
if err != nil {
logger.Error("instantiating prometheus controller failed", "err", err)
cancel()

View File

@@ -17,13 +17,13 @@ package prometheus
import (
"context"
"fmt"
"log/slog"
"reflect"
"regexp"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/mitchellh/hashstructure"
"github.com/prometheus/client_golang/prometheus"
appsv1 "k8s.io/api/apps/v1"
@@ -66,9 +66,13 @@ type Operator struct {
mdClient metadata.Interface
mclient monitoringclient.Interface
logger log.Logger
accessor *operator.Accessor
config prompkg.Config
// // We're currently migrating our logging library from go-kit to slog.
// The go-kit logger is being removed in small PRs. For now, we are creating 2 loggers to avoid breaking changes and
// to have a smooth transition.
goKitLogger log.Logger
logger *slog.Logger
accessor *operator.Accessor
config prompkg.Config
controllerID string
@@ -123,8 +127,9 @@ func WithStorageClassValidation() ControllerOption {
}
// New creates a new controller.
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, opts ...ControllerOption) (*Operator, error) {
logger = log.With(logger, "component", controllerName)
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger *slog.Logger, goKitLogger log.Logger, r prometheus.Registerer, opts ...ControllerOption) (*Operator, error) {
goKitLogger = log.With(goKitLogger, "component", controllerName)
logger = logger.With("component", controllerName)
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
@@ -145,11 +150,12 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
r = prometheus.WrapRegistererWith(prometheus.Labels{"controller": "prometheus"}, r)
o := &Operator{
kclient: client,
mdClient: mdClient,
mclient: mclient,
logger: logger,
accessor: operator.NewAccessor(logger),
kclient: client,
mdClient: mdClient,
mclient: mclient,
goKitLogger: goKitLogger,
logger: logger,
accessor: operator.NewAccessor(goKitLogger),
config: prompkg.Config{
LocalHost: c.LocalHost,
@@ -172,7 +178,7 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
o.metrics.MustRegister(o.reconciliations)
o.rr = operator.NewResourceReconciler(
o.logger,
o.goKitLogger,
o,
o.metrics,
monitoringv1.PrometheusesKind,
@@ -325,7 +331,7 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
newNamespaceInformer := func(o *Operator, allowList map[string]struct{}) (cache.SharedIndexInformer, error) {
lw, privileged, err := listwatch.NewNamespaceListWatchFromClient(
ctx,
o.logger,
o.goKitLogger,
c.KubernetesVersion,
o.kclient.CoreV1(),
o.kclient.AuthorizationV1().SelfSubjectAccessReviews(),
@@ -336,7 +342,7 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
return nil, err
}
level.Debug(o.logger).Log("msg", "creating namespace informer", "privileged", privileged)
o.logger.Debug("creating namespace informer", "privileged", privileged)
return cache.NewSharedIndexInformer(
o.metrics.NewInstrumentedListerWatcher(lw),
&v1.Namespace{}, resyncPeriod, cache.Indexers{},
@@ -390,7 +396,7 @@ func (c *Operator) waitForCacheSync(ctx context.Context) error {
}
for _, inf := range infs.informersForResource.GetInformers() {
if !operator.WaitForNamedCacheSync(ctx, "prometheus", log.With(c.logger, "informer", infs.name), inf.Informer()) {
if !operator.WaitForNamedCacheSync(ctx, "prometheus", log.With(c.goKitLogger, "informer", infs.name), inf.Informer()) {
return fmt.Errorf("failed to sync cache for %s informer", infs.name)
}
}
@@ -403,12 +409,12 @@ func (c *Operator) waitForCacheSync(ctx context.Context) error {
{"PromNamespace", c.nsPromInf},
{"MonNamespace", c.nsMonInf},
} {
if !operator.WaitForNamedCacheSync(ctx, "prometheus", log.With(c.logger, "informer", inf.name), inf.informer) {
if !operator.WaitForNamedCacheSync(ctx, "prometheus", log.With(c.goKitLogger, "informer", inf.name), inf.informer) {
return fmt.Errorf("failed to sync cache for %s informer", inf.name)
}
}
level.Info(c.logger).Log("msg", "successfully synced all caches")
c.logger.Info("successfully synced all caches")
return nil
}
@@ -419,7 +425,7 @@ func (c *Operator) addHandlers() {
c.ssetInfs.AddEventHandler(c.rr)
c.smonInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
monitoringv1.ServiceMonitorsKind,
@@ -427,7 +433,7 @@ func (c *Operator) addHandlers() {
))
c.pmonInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
monitoringv1.PodMonitorsKind,
@@ -435,7 +441,7 @@ func (c *Operator) addHandlers() {
))
c.probeInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
monitoringv1.ProbesKind,
@@ -444,7 +450,7 @@ func (c *Operator) addHandlers() {
if c.sconInfs != nil {
c.sconInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
monitoringv1alpha1.ScrapeConfigsKind,
@@ -453,7 +459,7 @@ func (c *Operator) addHandlers() {
}
c.ruleInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
monitoringv1.PrometheusRuleKind,
@@ -461,7 +467,7 @@ func (c *Operator) addHandlers() {
))
c.cmapInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
"ConfigMap",
@@ -469,7 +475,7 @@ func (c *Operator) addHandlers() {
))
c.secrInfs.AddEventHandler(operator.NewEventHandler(
c.logger,
c.goKitLogger,
c.accessor,
c.metrics,
"Secret",
@@ -531,7 +537,7 @@ func (c *Operator) Iterate(processFn func(metav1.Object, []monitoringv1.Conditio
p := o.(*monitoringv1.Prometheus)
processFn(p, p.Status.Conditions)
}); err != nil {
level.Error(c.logger).Log("msg", "failed to list Prometheus objects", "err", err)
c.logger.Error("failed to list Prometheus objects", "err", err)
}
}
@@ -553,15 +559,15 @@ func (c *Operator) enqueueForMonitorNamespace(nsName string) {
func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
nsObject, exists, err := store.GetByKey(nsName)
if err != nil {
level.Error(c.logger).Log(
"msg", "get namespace to enqueue Prometheus instances failed",
c.logger.Error(
"get namespace to enqueue Prometheus instances failed",
"err", err,
)
return
}
if !exists {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("get namespace to enqueue Prometheus instances failed: namespace %q does not exist", nsName),
c.logger.Error(
fmt.Sprintf("get namespace to enqueue Prometheus instances failed: namespace %q does not exist", nsName),
)
return
}
@@ -579,8 +585,8 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
// the namespace.
smNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert ServiceMonitorNamespaceSelector of %q to selector", p.Name),
c.logger.Error(
fmt.Sprintf("failed to convert ServiceMonitorNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
@@ -594,8 +600,8 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
// Check for Prometheus instances selecting PodMonitors in the NS.
pmNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.PodMonitorNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert PodMonitorNamespaceSelector of %q to selector", p.Name),
c.logger.Error(
fmt.Sprintf("failed to convert PodMonitorNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
@@ -609,8 +615,8 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
// Check for Prometheus instances selecting Probes in the NS.
bmNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ProbeNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert ProbeNamespaceSelector of %q to selector", p.Name),
c.logger.Error(
fmt.Sprintf("failed to convert ProbeNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
@@ -625,8 +631,8 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
// the NS.
ruleNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.RuleNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert RuleNamespaceSelector of %q to selector", p.Name),
c.logger.Error(
fmt.Sprintf("failed to convert RuleNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
@@ -640,8 +646,8 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
// the NS.
scrapeConfigNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ScrapeConfigNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert ScrapeConfigNamespaceSelector of %q to selector", p.Name),
c.logger.Error(
fmt.Sprintf("failed to convert ScrapeConfigNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
@@ -653,8 +659,8 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
}
})
if err != nil {
level.Error(c.logger).Log(
"msg", "listing all Prometheus instances from cache failed",
c.logger.Error(
"listing all Prometheus instances from cache failed",
"err", err,
)
}
@@ -670,7 +676,7 @@ func (c *Operator) Resolve(ss *appsv1.StatefulSet) metav1.Object {
match, promKey := statefulSetKeyToPrometheusKey(key)
if !match {
level.Debug(c.logger).Log("msg", "StatefulSet key did not match a Prometheus key format", "key", key)
c.logger.Debug("StatefulSet key did not match a Prometheus key format", "key", key)
return nil
}
@@ -680,7 +686,7 @@ func (c *Operator) Resolve(ss *appsv1.StatefulSet) metav1.Object {
}
if err != nil {
level.Error(c.logger).Log("msg", "Prometheus lookup failed", "err", err)
c.logger.Error("Prometheus lookup failed", "err", err)
return nil
}
@@ -707,7 +713,7 @@ func (c *Operator) handleMonitorNamespaceUpdate(oldo, curo interface{}) {
old := oldo.(*v1.Namespace)
cur := curo.(*v1.Namespace)
level.Debug(c.logger).Log("msg", "update handler", "namespace", cur.GetName(), "old", old.ResourceVersion, "cur", cur.ResourceVersion)
c.logger.Debug("update handler", "namespace", cur.GetName(), "old", old.ResourceVersion, "cur", cur.ResourceVersion)
// Periodic resync may resend the Namespace without changes
// in-between.
@@ -715,7 +721,7 @@ func (c *Operator) handleMonitorNamespaceUpdate(oldo, curo interface{}) {
return
}
level.Debug(c.logger).Log("msg", "Monitor namespace updated", "namespace", cur.GetName())
c.logger.Debug("Monitor namespace updated", "namespace", cur.GetName())
c.metrics.TriggerByCounter("Namespace", operator.UpdateEvent).Inc()
// Check for Prometheus instances selecting ServiceMonitors, PodMonitors,
@@ -732,7 +738,8 @@ func (c *Operator) handleMonitorNamespaceUpdate(oldo, curo interface{}) {
sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector)
if err != nil {
level.Error(c.logger).Log(
c.logger.Error(
"",
"err", err,
"name", p.Name,
"namespace", p.Namespace,
@@ -748,8 +755,8 @@ func (c *Operator) handleMonitorNamespaceUpdate(oldo, curo interface{}) {
}
})
if err != nil {
level.Error(c.logger).Log(
"msg", "listing all Prometheus instances from cache failed",
c.logger.Error(
"listing all Prometheus instances from cache failed",
"err", err,
)
}
@@ -781,7 +788,7 @@ func (c *Operator) sync(ctx context.Context, key string) error {
return fmt.Errorf("failed to set Prometheus type information: %w", err)
}
logger := log.With(c.logger, "key", key)
logger := c.logger.With("key", key)
logDeprecatedFields(logger, p)
// Check if the Prometheus instance is marked for deletion.
@@ -794,11 +801,11 @@ func (c *Operator) sync(ctx context.Context, key string) error {
}
if p.Spec.Paused {
level.Info(logger).Log("msg", "the resource is paused, not reconciling")
logger.Info("the resource is paused, not reconciling")
return nil
}
level.Info(logger).Log("msg", "sync prometheus")
logger.Info("sync prometheus")
ruleConfigMapNames, err := c.createOrUpdateRuleConfigMaps(ctx, p)
if err != nil {
return err
@@ -806,7 +813,7 @@ func (c *Operator) sync(ctx context.Context, key string) error {
assetStore := assets.NewStoreBuilder(c.kclient.CoreV1(), c.kclient.CoreV1())
cg, err := prompkg.NewConfigGenerator(c.logger, p, c.endpointSliceSupported)
cg, err := prompkg.NewConfigGenerator(c.goKitLogger, p, c.endpointSliceSupported)
if err != nil {
return err
}
@@ -835,8 +842,8 @@ func (c *Operator) sync(ctx context.Context, key string) error {
// Ensure we have a StatefulSet running Prometheus deployed and that StatefulSet names are created correctly.
expected := prompkg.ExpectedStatefulSetShardNames(p)
for shard, ssetName := range expected {
logger := log.With(logger, "statefulset", ssetName, "shard", fmt.Sprintf("%d", shard))
level.Debug(logger).Log("msg", "reconciling statefulset")
logger := logger.With("statefulset", ssetName, "shard", fmt.Sprintf("%d", shard))
logger.Debug("reconciling statefulset")
obj, err := c.ssetInfs.Get(prompkg.KeyToStatefulSetKey(p, key, shard))
exists := !apierrors.IsNotFound(err)
@@ -891,8 +898,8 @@ func (c *Operator) sync(ctx context.Context, key string) error {
operator.SanitizeSTS(sset)
if !exists {
level.Debug(logger).Log("msg", "no current statefulset found")
level.Debug(logger).Log("msg", "creating statefulset")
logger.Debug("no current statefulset found")
logger.Debug("creating statefulset")
if _, err := ssetClient.Create(ctx, sset, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("creating statefulset failed: %w", err)
}
@@ -900,12 +907,12 @@ func (c *Operator) sync(ctx context.Context, key string) error {
}
if newSSetInputHash == existingStatefulSet.ObjectMeta.Annotations[operator.InputHashAnnotationName] {
level.Debug(logger).Log("msg", "new statefulset generation inputs match current, skipping any actions")
logger.Debug("new statefulset generation inputs match current, skipping any actions")
continue
}
level.Debug(logger).Log(
"msg", "updating current statefulset because of hash divergence",
logger.Debug(
"updating current statefulset because of hash divergence",
"new_hash", newSSetInputHash,
"existing_hash", existingStatefulSet.ObjectMeta.Annotations[operator.InputHashAnnotationName],
)
@@ -922,7 +929,7 @@ func (c *Operator) sync(ctx context.Context, key string) error {
failMsg[i] = cause.Message
}
level.Info(logger).Log("msg", "recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", "))
logger.Info("recreating StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", "))
propagationPolicy := metav1.DeletePropagationForeground
if err := ssetClient.Delete(ctx, sset.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
@@ -956,7 +963,7 @@ func (c *Operator) sync(ctx context.Context, key string) error {
propagationPolicy := metav1.DeletePropagationForeground
if err := ssetClient.Delete(ctx, s.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
level.Error(c.logger).Log("err", err, "name", s.GetName(), "namespace", s.GetNamespace())
c.logger.Error("", "err", err, "name", s.GetName(), "namespace", s.GetNamespace())
}
})
if err != nil {
@@ -997,7 +1004,7 @@ func (c *Operator) UpdateStatus(ctx context.Context, key string) error {
p.Status.Shards = ptr.Deref(p.Spec.Shards, 1)
if _, err = c.mclient.MonitoringV1().Prometheuses(p.Namespace).ApplyStatus(ctx, prompkg.ApplyConfigurationFromPrometheus(p, true), metav1.ApplyOptions{FieldManager: operator.PrometheusOperatorFieldManager, Force: true}); err != nil {
level.Info(c.logger).Log("msg", "failed to apply prometheus status subresource, trying again without scale fields", "err", err)
c.logger.Info("failed to apply prometheus status subresource, trying again without scale fields", "err", err)
// Try again, but this time does not update scale subresource.
if _, err = c.mclient.MonitoringV1().Prometheuses(p.Namespace).ApplyStatus(ctx, prompkg.ApplyConfigurationFromPrometheus(p, false), metav1.ApplyOptions{FieldManager: operator.PrometheusOperatorFieldManager, Force: true}); err != nil {
return fmt.Errorf("failed to apply prometheus status subresource: %w", err)
@@ -1007,44 +1014,44 @@ func (c *Operator) UpdateStatus(ctx context.Context, key string) error {
return nil
}
func logDeprecatedFields(logger log.Logger, p *monitoringv1.Prometheus) {
func logDeprecatedFields(logger *slog.Logger, p *monitoringv1.Prometheus) {
deprecationWarningf := "field %q is deprecated, field %q should be used instead"
//nolint:staticcheck // Ignore SA1019 this field is marked as deprecated.
if p.Spec.BaseImage != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, "spec.baseImage", "spec.image"))
logger.Warn(fmt.Sprintf(deprecationWarningf, "spec.baseImage", "spec.image"))
}
//nolint:staticcheck // Ignore SA1019 this field is marked as deprecated.
if p.Spec.Tag != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, "spec.tag", "spec.image"))
logger.Warn(fmt.Sprintf(deprecationWarningf, "spec.tag", "spec.image"))
}
//nolint:staticcheck // Ignore SA1019 this field is marked as deprecated.
if p.Spec.SHA != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, "spec.sha", "spec.image"))
logger.Warn(fmt.Sprintf(deprecationWarningf, "spec.sha", "spec.image"))
}
if p.Spec.Thanos != nil {
//nolint:staticcheck // Ignore SA1019 this field is marked as deprecated.
if p.Spec.BaseImage != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, "spec.thanos.baseImage", "spec.thanos.image"))
logger.Warn(fmt.Sprintf(deprecationWarningf, "spec.thanos.baseImage", "spec.thanos.image"))
}
//nolint:staticcheck // Ignore SA1019 this field is marked as deprecated.
if p.Spec.Tag != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, "spec.thanos.tag", "spec.thanos.image"))
logger.Warn(fmt.Sprintf(deprecationWarningf, "spec.thanos.tag", "spec.thanos.image"))
}
//nolint:staticcheck // Ignore SA1019 this field is marked as deprecated.
if p.Spec.SHA != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, "spec.thanos.sha", "spec.thanos.image"))
logger.Warn(fmt.Sprintf(deprecationWarningf, "spec.thanos.sha", "spec.thanos.image"))
}
}
if p.Spec.ServiceMonitorSelector == nil && p.Spec.PodMonitorSelector == nil && p.Spec.ProbeSelector == nil && p.Spec.ScrapeConfigSelector == nil {
level.Warn(logger).Log("msg", "neither serviceMonitorSelector nor podMonitorSelector, nor probeSelector specified. Custom configuration is deprecated, use additionalScrapeConfigs instead")
logger.Warn("neither serviceMonitorSelector nor podMonitorSelector, nor probeSelector specified. Custom configuration is deprecated, use additionalScrapeConfigs instead")
}
}
@@ -1102,7 +1109,7 @@ func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *mon
// exist.
if p.Spec.ServiceMonitorSelector == nil && p.Spec.PodMonitorSelector == nil &&
p.Spec.ProbeSelector == nil && p.Spec.ScrapeConfigSelector == nil {
level.Debug(c.logger).Log("msg", "neither ServiceMonitor nor PodMonitor, nor Probe selector specified, leaving configuration unmanaged", "prometheus", p.Name, "namespace", p.Namespace)
c.logger.Debug("neither ServiceMonitor nor PodMonitor, nor Probe selector specified, leaving configuration unmanaged", "prometheus", p.Name, "namespace", p.Namespace)
// make an empty secret
s, err := prompkg.MakeConfigurationSecret(p, c.config, nil)
@@ -1123,7 +1130,7 @@ func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *mon
return nil
}
resourceSelector, err := prompkg.NewResourceSelector(c.logger, p, store, c.nsMonInf, c.metrics, c.eventRecorder)
resourceSelector, err := prompkg.NewResourceSelector(c.goKitLogger, p, store, c.nsMonInf, c.metrics, c.eventRecorder)
if err != nil {
return err
}
@@ -1182,15 +1189,15 @@ func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *mon
}
sClient := c.kclient.CoreV1().Secrets(p.Namespace)
additionalScrapeConfigs, err := k8sutil.LoadSecretRef(ctx, c.logger, sClient, p.Spec.AdditionalScrapeConfigs)
additionalScrapeConfigs, err := k8sutil.LoadSecretRef(ctx, c.goKitLogger, sClient, p.Spec.AdditionalScrapeConfigs)
if err != nil {
return fmt.Errorf("loading additional scrape configs from Secret failed: %w", err)
}
additionalAlertRelabelConfigs, err := k8sutil.LoadSecretRef(ctx, c.logger, sClient, p.Spec.AdditionalAlertRelabelConfigs)
additionalAlertRelabelConfigs, err := k8sutil.LoadSecretRef(ctx, c.goKitLogger, sClient, p.Spec.AdditionalAlertRelabelConfigs)
if err != nil {
return fmt.Errorf("loading additional alert relabel configs from Secret failed: %w", err)
}
additionalAlertManagerConfigs, err := k8sutil.LoadSecretRef(ctx, c.logger, sClient, p.Spec.AdditionalAlertManagerConfigs)
additionalAlertManagerConfigs, err := k8sutil.LoadSecretRef(ctx, c.goKitLogger, sClient, p.Spec.AdditionalAlertManagerConfigs)
if err != nil {
return fmt.Errorf("loading additional alert manager configs from Secret failed: %w", err)
}
@@ -1224,7 +1231,7 @@ func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *mon
return fmt.Errorf("creating compressed secret failed: %w", err)
}
level.Debug(c.logger).Log("msg", "updating Prometheus configuration secret")
c.logger.Debug("updating Prometheus configuration secret")
return k8sutil.CreateOrUpdateSecret(ctx, sClient, s)
}

View File

@@ -64,7 +64,7 @@ func (c *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, p *monitori
true,
)
logger := log.With(c.logger, "prometheus", p.Name, "namespace", p.Namespace)
logger := log.With(c.goKitLogger, "prometheus", p.Name, "namespace", p.Namespace)
promVersion := operator.StringValOrDefault(p.GetCommonPrometheusFields().Version, operator.DefaultPrometheusVersion)
promRuleSelector, err := operator.NewPrometheusRuleSelector(operator.PrometheusFormat, promVersion, p.Spec.RuleSelector, nsLabeler, c.ruleInfs, c.eventRecorder, logger)
@@ -97,7 +97,7 @@ func (c *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, p *monitori
equal := reflect.DeepEqual(newRules, currentRules)
if equal && len(currentConfigMaps) != 0 {
level.Debug(c.logger).Log(
level.Debug(c.goKitLogger).Log(
"msg", "no PrometheusRule changes",
"namespace", p.Namespace,
"prometheus", p.Name,
@@ -125,7 +125,7 @@ func (c *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, p *monitori
}
if len(currentConfigMaps) == 0 {
level.Debug(c.logger).Log(
level.Debug(c.goKitLogger).Log(
"msg", "no PrometheusRule configmap found, creating new one",
"namespace", p.Namespace,
"prometheus", p.Name,
@@ -148,7 +148,7 @@ func (c *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, p *monitori
}
}
level.Debug(c.logger).Log(
level.Debug(c.goKitLogger).Log(
"msg", "updating PrometheusRule",
"namespace", p.Namespace,
"prometheus", p.Name,
@@ -185,7 +185,7 @@ func (c *Operator) selectRuleNamespaces(p *monitoringv1.Prometheus) ([]string, e
}
}
level.Debug(c.logger).Log(
level.Debug(c.goKitLogger).Log(
"msg", "selected RuleNamespaces",
"namespaces", strings.Join(namespaces, ","),
"namespace", p.Namespace,