mirror of
https://github.com/coreos/prometheus-operator.git
synced 2026-02-05 15:46:31 +01:00
*: fix golint errors (#3924)
Also fix the missing Thanos Ruler and Alertmanager collector metrics. Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
@@ -4,5 +4,6 @@ run:
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- golint
|
||||
- govet
|
||||
- unused
|
||||
|
||||
@@ -63,7 +63,7 @@ const (
|
||||
|
||||
const (
|
||||
logFormatLogfmt = "logfmt"
|
||||
logFormatJson = "json"
|
||||
logFormatJSON = "json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -140,7 +140,7 @@ var (
|
||||
}
|
||||
availableLogFormats = []string{
|
||||
logFormatLogfmt,
|
||||
logFormatJson,
|
||||
logFormatJSON,
|
||||
}
|
||||
cfg = operator.Config{}
|
||||
rcCPU, rcMemory string
|
||||
@@ -214,7 +214,7 @@ func Main() int {
|
||||
}
|
||||
|
||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
if cfg.LogFormat == logFormatJson {
|
||||
if cfg.LogFormat == logFormatJSON {
|
||||
logger = log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
switch cfg.LogLevel {
|
||||
|
||||
@@ -182,7 +182,7 @@ func validateRules(content []byte) error {
|
||||
rule := &admission.PrometheusRules{}
|
||||
err := yaml.Unmarshal(content, rule)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("unable load prometheus rule %v", err))
|
||||
return fmt.Errorf("unable load prometheus rule: %w", err)
|
||||
}
|
||||
rules, errorsArray := rulefmt.Parse(rule.Spec.Raw)
|
||||
if len(errorsArray) != 0 {
|
||||
@@ -196,7 +196,7 @@ func validateRules(content []byte) error {
|
||||
}
|
||||
for _, group := range rules.Groups {
|
||||
if len(group.Rules) == 0 {
|
||||
return errors.New(fmt.Sprintf("no rules found in group: %s ", group.Name))
|
||||
return fmt.Errorf("no rules found in group: %s: %w", group.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
|
||||
const (
|
||||
logFormatLogfmt = "logfmt"
|
||||
logFormatJson = "json"
|
||||
logFormatJSON = "json"
|
||||
|
||||
logLevelDebug = "debug"
|
||||
logLevelInfo = "info"
|
||||
@@ -56,7 +56,7 @@ const (
|
||||
var (
|
||||
availableLogFormats = []string{
|
||||
logFormatLogfmt,
|
||||
logFormatJson,
|
||||
logFormatJSON,
|
||||
}
|
||||
availableLogLevels = []string{
|
||||
logLevelDebug,
|
||||
@@ -118,7 +118,7 @@ func main() {
|
||||
|
||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
if *logFormat == logFormatJson {
|
||||
if *logFormat == logFormatJSON {
|
||||
logger = log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
|
||||
|
||||
@@ -33,11 +33,11 @@ var (
|
||||
)
|
||||
|
||||
type alertmanagerCollector struct {
|
||||
store cache.Store
|
||||
stores []cache.Store
|
||||
}
|
||||
|
||||
func NewAlertmanagerCollector(s cache.Store) *alertmanagerCollector {
|
||||
return &alertmanagerCollector{store: s}
|
||||
func newAlertmanagerCollectorForStores(s ...cache.Store) *alertmanagerCollector {
|
||||
return &alertmanagerCollector{stores: s}
|
||||
}
|
||||
|
||||
// Describe implements the prometheus.Collector interface.
|
||||
@@ -47,8 +47,10 @@ func (c *alertmanagerCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
|
||||
// Collect implements the prometheus.Collector interface.
|
||||
func (c *alertmanagerCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
for _, p := range c.store.List() {
|
||||
c.collectAlertmanager(ch, p.(*v1.Alertmanager))
|
||||
for _, s := range c.stores {
|
||||
for _, p := range s.List() {
|
||||
c.collectAlertmanager(ch, p.(*v1.Alertmanager))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -161,6 +161,12 @@ func (c *Operator) bootstrap(ctx context.Context) error {
|
||||
return errors.Wrap(err, "error creating alertmanager informers")
|
||||
}
|
||||
|
||||
var alertmanagerStores []cache.Store
|
||||
for _, informer := range c.alrtInfs.GetInformers() {
|
||||
alertmanagerStores = append(alertmanagerStores, informer.Informer().GetStore())
|
||||
}
|
||||
c.metrics.MustRegister(newAlertmanagerCollectorForStores(alertmanagerStores...))
|
||||
|
||||
c.alrtCfgInfs, err = informers.NewInformersForResource(
|
||||
informers.NewMonitoringInformerFactories(
|
||||
c.config.Namespaces.AllowList,
|
||||
@@ -1355,7 +1361,7 @@ func ListOptions(name string) metav1.ListOptions {
|
||||
}
|
||||
}
|
||||
|
||||
func AlertmanagerStatus(ctx context.Context, kclient kubernetes.Interface, a *monitoringv1.Alertmanager) (*monitoringv1.AlertmanagerStatus, []v1.Pod, error) {
|
||||
func Status(ctx context.Context, kclient kubernetes.Interface, a *monitoringv1.Alertmanager) (*monitoringv1.AlertmanagerStatus, []v1.Pod, error) {
|
||||
res := &monitoringv1.AlertmanagerStatus{Paused: a.Spec.Paused}
|
||||
|
||||
pods, err := kclient.CoreV1().Pods(a.Namespace).List(ctx, ListOptions(a.Name))
|
||||
|
||||
@@ -80,7 +80,7 @@ type objectReference struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func parsePrometheusStatusUrl(path string) objectReference {
|
||||
func parsePrometheusStatusURL(path string) objectReference {
|
||||
matches := prometheusRoute.FindAllStringSubmatch(path, -1)
|
||||
ns := ""
|
||||
name := ""
|
||||
@@ -98,7 +98,7 @@ func parsePrometheusStatusUrl(path string) objectReference {
|
||||
}
|
||||
|
||||
func (api *API) prometheusStatus(w http.ResponseWriter, req *http.Request) {
|
||||
or := parsePrometheusStatusUrl(req.URL.Path)
|
||||
or := parsePrometheusStatusURL(req.URL.Path)
|
||||
|
||||
p, err := api.mclient.MonitoringV1().Prometheuses(or.namespace).Get(req.Context(), or.name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -109,7 +109,7 @@ func (api *API) prometheusStatus(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
p.Status, _, err = prometheus.PrometheusStatus(req.Context(), api.kclient, p)
|
||||
p.Status, _, err = prometheus.Status(req.Context(), api.kclient, p)
|
||||
if err != nil {
|
||||
api.logger.Log("error", err)
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func NewClusterConfig(host string, tlsInsecure bool, tlsConfig *rest.TLSClientCo
|
||||
if kubeconfigFile != "" {
|
||||
cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating config from specified file: %s %v\n", kubeconfigFile, err)
|
||||
return nil, fmt.Errorf("error creating config from %s: %w", kubeconfigFile, err)
|
||||
}
|
||||
} else {
|
||||
if len(host) == 0 {
|
||||
@@ -82,7 +82,7 @@ func NewClusterConfig(host string, tlsInsecure bool, tlsConfig *rest.TLSClientCo
|
||||
}
|
||||
hostURL, err := url.Parse(host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing host url %s : %v", host, err)
|
||||
return nil, fmt.Errorf("error parsing host url %s: %w", host, err)
|
||||
}
|
||||
if hostURL.Scheme == "https" {
|
||||
cfg.TLSClientConfig = *tlsConfig
|
||||
|
||||
@@ -43,11 +43,7 @@ type prometheusCollector struct {
|
||||
stores []cache.Store
|
||||
}
|
||||
|
||||
func NewPrometheusCollector(s cache.Store) *prometheusCollector {
|
||||
return &prometheusCollector{stores: []cache.Store{s}}
|
||||
}
|
||||
|
||||
func NewPrometheusCollectorForStores(s ...cache.Store) *prometheusCollector {
|
||||
func newPrometheusCollectorForStores(s ...cache.Store) *prometheusCollector {
|
||||
return &prometheusCollector{stores: s}
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ func New(ctx context.Context, conf operator.Config, logger log.Logger, r prometh
|
||||
for _, informer := range c.promInfs.GetInformers() {
|
||||
promStores = append(promStores, informer.Informer().GetStore())
|
||||
}
|
||||
c.metrics.MustRegister(NewPrometheusCollectorForStores(promStores...))
|
||||
c.metrics.MustRegister(newPrometheusCollectorForStores(promStores...))
|
||||
|
||||
c.smonInfs, err = informers.NewInformersForResource(
|
||||
informers.NewMonitoringInformerFactories(
|
||||
@@ -1327,10 +1327,10 @@ func ListOptions(name string) metav1.ListOptions {
|
||||
}
|
||||
}
|
||||
|
||||
// PrometheusStatus evaluates the current status of a Prometheus deployment with
|
||||
// respect to its specified resource object. It return the status and a list of
|
||||
// Status evaluates the current status of a Prometheus deployment with
|
||||
// respect to its specified resource object. It returns the status and a list of
|
||||
// pods that are not updated.
|
||||
func PrometheusStatus(ctx context.Context, kclient kubernetes.Interface, p *monitoringv1.Prometheus) (*monitoringv1.PrometheusStatus, []v1.Pod, error) {
|
||||
func Status(ctx context.Context, kclient kubernetes.Interface, p *monitoringv1.Prometheus) (*monitoringv1.PrometheusStatus, []v1.Pod, error) {
|
||||
res := &monitoringv1.PrometheusStatus{Paused: p.Spec.Paused}
|
||||
|
||||
pods, err := kclient.CoreV1().Pods(p.Namespace).List(ctx, ListOptions(p.Name))
|
||||
|
||||
@@ -32,12 +32,12 @@ var (
|
||||
)
|
||||
|
||||
type thanosRulerCollector struct {
|
||||
store cache.Store
|
||||
stores []cache.Store
|
||||
}
|
||||
|
||||
// NewThanosRulerCollector creates a thanosRulerCollector initialized with the given cache store
|
||||
func NewThanosRulerCollector(s cache.Store) *thanosRulerCollector {
|
||||
return &thanosRulerCollector{store: s}
|
||||
// newThanosRulerCollectorForStores creates a thanosRulerCollector initialized with the given cache store
|
||||
func newThanosRulerCollectorForStores(s ...cache.Store) *thanosRulerCollector {
|
||||
return &thanosRulerCollector{stores: s}
|
||||
}
|
||||
|
||||
// Describe implements the prometheus.Collector interface.
|
||||
@@ -47,8 +47,10 @@ func (c *thanosRulerCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
|
||||
// Collect implements the prometheus.Collector interface.
|
||||
func (c *thanosRulerCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
for _, tr := range c.store.List() {
|
||||
c.collectThanos(ch, tr.(*v1.ThanosRuler))
|
||||
for _, s := range c.stores {
|
||||
for _, tr := range s.List() {
|
||||
c.collectThanos(ch, tr.(*v1.ThanosRuler))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -163,6 +163,12 @@ func New(ctx context.Context, conf operator.Config, logger log.Logger, r prometh
|
||||
return nil, errors.Wrap(err, "error creating thanosruler informers")
|
||||
}
|
||||
|
||||
var thanosStores []cache.Store
|
||||
for _, informer := range o.thanosRulerInfs.GetInformers() {
|
||||
thanosStores = append(thanosStores, informer.Informer().GetStore())
|
||||
}
|
||||
o.metrics.MustRegister(newThanosRulerCollectorForStores(thanosStores...))
|
||||
|
||||
o.ruleInfs, err = informers.NewInformersForResource(
|
||||
informers.NewMonitoringInformerFactories(
|
||||
o.config.Namespaces.AllowList,
|
||||
@@ -711,10 +717,10 @@ func ListOptions(name string) metav1.ListOptions {
|
||||
}
|
||||
}
|
||||
|
||||
// ThanosRulerStatus evaluates the current status of a ThanosRuler deployment with
|
||||
// respect to its specified resource object. It return the status and a list of
|
||||
// RulerStatus evaluates the current status of a ThanosRuler deployment with
|
||||
// respect to its specified resource object. It returns the status and a list of
|
||||
// pods that are not updated.
|
||||
func ThanosRulerStatus(ctx context.Context, kclient kubernetes.Interface, tr *monitoringv1.ThanosRuler) (*monitoringv1.ThanosRulerStatus, []v1.Pod, error) {
|
||||
func RulerStatus(ctx context.Context, kclient kubernetes.Interface, tr *monitoringv1.ThanosRuler) (*monitoringv1.ThanosRulerStatus, []v1.Pod, error) {
|
||||
res := &monitoringv1.ThanosRulerStatus{Paused: tr.Spec.Paused}
|
||||
|
||||
pods, err := kclient.CoreV1().Pods(tr.Namespace).List(ctx, ListOptions(tr.Name))
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
testFramework "github.com/prometheus-operator/prometheus-operator/test/framework"
|
||||
)
|
||||
|
||||
func testAlertmanagerInstanceNamespaces_AllNs(t *testing.T) {
|
||||
func testAlertmanagerInstanceNamespacesAllNs(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
@@ -72,7 +72,7 @@ func testAlertmanagerInstanceNamespaces_AllNs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testAlertmanagerInstanceNamespaces_DenyNs(t *testing.T) {
|
||||
func testAlertmanagerInstanceNamespacesDenyNs(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
@@ -101,7 +101,7 @@ func testAlertmanagerInstanceNamespaces_DenyNs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testAlertmanagerInstanceNamespaces_AllowList(t *testing.T) {
|
||||
func testAlertmanagerInstanceNamespacesAllowList(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
|
||||
@@ -315,7 +315,7 @@ func testAMClusterGossipSilences(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
silId, err := framework.CreateSilence(ns, "alertmanager-test-0")
|
||||
silID, err := framework.CreateSilence(ns, "alertmanager-test-0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create silence: %v", err)
|
||||
}
|
||||
@@ -331,8 +331,8 @@ func testAMClusterGossipSilences(t *testing.T) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if *silences[0].ID != silId {
|
||||
return false, errors.Errorf("expected silence id on alertmanager %v to match id of created silence '%v' but got %v", i, silId, *silences[0].ID)
|
||||
if *silences[0].ID != silID {
|
||||
return false, errors.Errorf("expected silence id on alertmanager %v to match id of created silence '%v' but got %v", i, silID, *silences[0].ID)
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
@@ -760,7 +760,7 @@ func testAlertmanagerConfigCRD(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slackApiURLSecret := &v1.Secret{
|
||||
slackAPIURLSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "s-receiver-api-url",
|
||||
},
|
||||
@@ -768,7 +768,7 @@ func testAlertmanagerConfigCRD(t *testing.T) {
|
||||
"api-url": []byte("http://slack.example.com"),
|
||||
},
|
||||
}
|
||||
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.TODO(), slackApiURLSecret, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := framework.KubeClient.CoreV1().Secrets(ns).Create(context.TODO(), slackAPIURLSecret, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -244,10 +244,10 @@ func TestDenylist(t *testing.T) {
|
||||
func TestPromInstanceNs(t *testing.T) {
|
||||
skipPrometheusTests(t)
|
||||
testFuncs := map[string]func(t *testing.T){
|
||||
"AllNs": testPrometheusInstanceNamespaces_AllNs,
|
||||
"AllowList": testPrometheusInstanceNamespaces_AllowList,
|
||||
"DenyList": testPrometheusInstanceNamespaces_DenyList,
|
||||
"NamespaceNotFound": testPrometheusInstanceNamespaces_NamespaceNotFound,
|
||||
"AllNs": testPrometheusInstanceNamespacesAllNs,
|
||||
"AllowList": testPrometheusInstanceNamespacesAllowList,
|
||||
"DenyList": testPrometheusInstanceNamespacesDenyList,
|
||||
"NamespaceNotFound": testPrometheusInstanceNamespacesNamespaceNotFound,
|
||||
}
|
||||
|
||||
for name, f := range testFuncs {
|
||||
@@ -259,9 +259,9 @@ func TestPromInstanceNs(t *testing.T) {
|
||||
func TestAlertmanagerInstanceNs(t *testing.T) {
|
||||
skipAlertmanagerTests(t)
|
||||
testFuncs := map[string]func(t *testing.T){
|
||||
"AllNs": testAlertmanagerInstanceNamespaces_AllNs,
|
||||
"AllowList": testAlertmanagerInstanceNamespaces_AllowList,
|
||||
"DenyNs": testAlertmanagerInstanceNamespaces_DenyNs,
|
||||
"AllNs": testAlertmanagerInstanceNamespacesAllNs,
|
||||
"AllowList": testAlertmanagerInstanceNamespacesAllowList,
|
||||
"DenyNs": testAlertmanagerInstanceNamespacesDenyNs,
|
||||
}
|
||||
|
||||
for name, f := range testFuncs {
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func testPrometheusInstanceNamespaces_AllNs(t *testing.T) {
|
||||
func testPrometheusInstanceNamespacesAllNs(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
@@ -58,7 +58,7 @@ func testPrometheusInstanceNamespaces_AllNs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testPrometheusInstanceNamespaces_DenyList(t *testing.T) {
|
||||
func testPrometheusInstanceNamespacesDenyList(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
@@ -182,7 +182,7 @@ func testPrometheusInstanceNamespaces_DenyList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testPrometheusInstanceNamespaces_AllowList(t *testing.T) {
|
||||
func testPrometheusInstanceNamespacesAllowList(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
@@ -315,11 +315,11 @@ func testPrometheusInstanceNamespaces_AllowList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// testPrometheusInstanceNamespaces_NamespaceNotFound verifies that the
|
||||
// testPrometheusInstanceNamespacesNamespaceNotFound verifies that the
|
||||
// operator can reconcile Prometheus and associated resources even when
|
||||
// it's configured to watch namespaces that don't exist.
|
||||
// See https://github.com/prometheus-operator/prometheus-operator/issues/3347
|
||||
func testPrometheusInstanceNamespaces_NamespaceNotFound(t *testing.T) {
|
||||
func testPrometheusInstanceNamespacesNamespaceNotFound(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
|
||||
|
||||
@@ -3338,10 +3338,10 @@ func testPromStaticProbe(t *testing.T) {
|
||||
group := "probe-test"
|
||||
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
|
||||
|
||||
proberUrl := blackboxExporterName + ":9115"
|
||||
proberURL := blackboxExporterName + ":9115"
|
||||
targets := []string{svc.Name + ":9090"}
|
||||
|
||||
probe := framework.MakeBasicStaticProbe(group, proberUrl, targets)
|
||||
probe := framework.MakeBasicStaticProbe(group, proberURL, targets)
|
||||
if _, err := framework.MonClientV1.Probes(ns).Create(context.TODO(), probe, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal("Creating Probe failed: ", err)
|
||||
}
|
||||
@@ -3362,7 +3362,7 @@ func testPromStaticProbe(t *testing.T) {
|
||||
ctx.AddFinalizerFn(finalizerFn)
|
||||
}
|
||||
|
||||
expectedURL := url.URL{Host: proberUrl, Scheme: "http", Path: "/probe"}
|
||||
expectedURL := url.URL{Host: proberURL, Scheme: "http", Path: "/probe"}
|
||||
q := expectedURL.Query()
|
||||
q.Set("module", "http_2xx")
|
||||
q.Set("target", targets[0])
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func CreateMutatingHook(kubeClient kubernetes.Interface, certBytes []byte, namespace, yamlPath string) (finalizerFn, error) {
|
||||
func createMutatingHook(kubeClient kubernetes.Interface, certBytes []byte, namespace, yamlPath string) (FinalizerFn, error) {
|
||||
h, err := parseMutatingHookYaml(yamlPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("Failed parsing mutating webhook"))
|
||||
@@ -38,12 +38,12 @@ func CreateMutatingHook(kubeClient kubernetes.Interface, certBytes []byte, names
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to create mutating webhook %s", h.Name))
|
||||
}
|
||||
|
||||
finalizerFn := func() error { return DeleteMutatingWebhook(kubeClient, h.Name) }
|
||||
finalizerFn := func() error { return deleteMutatingWebhook(kubeClient, h.Name) }
|
||||
|
||||
return finalizerFn, nil
|
||||
}
|
||||
|
||||
func CreateValidatingHook(kubeClient kubernetes.Interface, certBytes []byte, namespace, yamlPath string) (finalizerFn, error) {
|
||||
func createValidatingHook(kubeClient kubernetes.Interface, certBytes []byte, namespace, yamlPath string) (FinalizerFn, error) {
|
||||
h, err := parseValidatingHookYaml(yamlPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("Failed parsing mutating webhook"))
|
||||
@@ -57,16 +57,16 @@ func CreateValidatingHook(kubeClient kubernetes.Interface, certBytes []byte, nam
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to create validating webhook %s", h.Name))
|
||||
}
|
||||
|
||||
finalizerFn := func() error { return DeleteValidatingWebhook(kubeClient, h.Name) }
|
||||
finalizerFn := func() error { return deleteValidatingWebhook(kubeClient, h.Name) }
|
||||
|
||||
return finalizerFn, nil
|
||||
}
|
||||
|
||||
func DeleteMutatingWebhook(kubeClient kubernetes.Interface, name string) error {
|
||||
func deleteMutatingWebhook(kubeClient kubernetes.Interface, name string) error {
|
||||
return kubeClient.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func DeleteValidatingWebhook(kubeClient kubernetes.Interface, name string) error {
|
||||
func deleteValidatingWebhook(kubeClient kubernetes.Interface, name string) error {
|
||||
return kubeClient.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func CreateClusterRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (finalizerFn, error) {
|
||||
func createClusterRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (FinalizerFn, error) {
|
||||
finalizerFn := func() error { return DeleteClusterRoleBinding(kubeClient, ns, relativePath) }
|
||||
clusterRoleBinding, err := parseClusterRoleBindingYaml(relativePath)
|
||||
if err != nil {
|
||||
|
||||
@@ -25,10 +25,10 @@ import (
|
||||
|
||||
type TestCtx struct {
|
||||
ID string
|
||||
cleanUpFns []finalizerFn
|
||||
cleanUpFns []FinalizerFn
|
||||
}
|
||||
|
||||
type finalizerFn func() error
|
||||
type FinalizerFn func() error
|
||||
|
||||
func (f *Framework) NewTestCtx(t *testing.T) TestCtx {
|
||||
// TestCtx is used among others for namespace names where '/' is forbidden
|
||||
@@ -68,6 +68,6 @@ func (ctx *TestCtx) Cleanup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *TestCtx) AddFinalizerFn(fn finalizerFn) {
|
||||
func (ctx *TestCtx) AddFinalizerFn(fn FinalizerFn) {
|
||||
ctx.cleanUpFns = append(ctx.cleanUpFns, fn)
|
||||
}
|
||||
|
||||
@@ -174,11 +174,11 @@ func (f *Framework) MakeEchoDeployment(group string) *appsv1.Deployment {
|
||||
// Returns the CA, which can bs used to access the operator over TLS
|
||||
func (f *Framework) CreatePrometheusOperator(ns, opImage string, namespaceAllowlist,
|
||||
namespaceDenylist, prometheusInstanceNamespaces, alertmanagerInstanceNamespaces []string,
|
||||
createRuleAdmissionHooks, createClusterRoleBindings bool) ([]finalizerFn, error) {
|
||||
createRuleAdmissionHooks, createClusterRoleBindings bool) ([]FinalizerFn, error) {
|
||||
|
||||
var finalizers []finalizerFn
|
||||
var finalizers []FinalizerFn
|
||||
|
||||
_, err := CreateServiceAccount(
|
||||
_, err := createServiceAccount(
|
||||
f.KubeClient,
|
||||
ns,
|
||||
"../../example/rbac/prometheus-operator/prometheus-operator-service-account.yaml",
|
||||
@@ -199,7 +199,7 @@ func (f *Framework) CreatePrometheusOperator(ns, opImage string, namespaceAllowl
|
||||
}
|
||||
|
||||
if createClusterRoleBindings {
|
||||
if _, err := CreateClusterRoleBinding(f.KubeClient, ns, "../../example/rbac/prometheus-operator/prometheus-operator-cluster-role-binding.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
if _, err := createClusterRoleBinding(f.KubeClient, ns, "../../example/rbac/prometheus-operator/prometheus-operator-cluster-role-binding.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, errors.Wrap(err, "failed to create prometheus cluster role binding")
|
||||
}
|
||||
} else {
|
||||
@@ -382,17 +382,17 @@ func (f *Framework) CreatePrometheusOperator(ns, opImage string, namespaceAllowl
|
||||
}
|
||||
|
||||
if createRuleAdmissionHooks {
|
||||
if finalizer, err := CreateMutatingHook(f.KubeClient, certBytes, ns, "../../test/framework/resources/prometheus-operator-mutatingwebhook.yaml"); err != nil {
|
||||
finalizer, err := createMutatingHook(f.KubeClient, certBytes, ns, "../../test/framework/resources/prometheus-operator-mutatingwebhook.yaml")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create mutating webhook")
|
||||
} else {
|
||||
finalizers = append(finalizers, finalizer)
|
||||
}
|
||||
finalizers = append(finalizers, finalizer)
|
||||
|
||||
if finalizer, err := CreateValidatingHook(f.KubeClient, certBytes, ns, "../../test/framework/resources/prometheus-operator-validatingwebhook.yaml"); err != nil {
|
||||
finalizer, err = createValidatingHook(f.KubeClient, certBytes, ns, "../../test/framework/resources/prometheus-operator-validatingwebhook.yaml")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create validating webhook")
|
||||
} else {
|
||||
finalizers = append(finalizers, finalizer)
|
||||
}
|
||||
finalizers = append(finalizers, finalizer)
|
||||
}
|
||||
|
||||
return finalizers, nil
|
||||
@@ -402,7 +402,7 @@ func (ctx *TestCtx) SetupPrometheusRBAC(t *testing.T, ns string, kubeClient kube
|
||||
if _, err := CreateClusterRole(kubeClient, "../../example/rbac/prometheus/prometheus-cluster-role.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
t.Fatalf("failed to create prometheus cluster role: %v", err)
|
||||
}
|
||||
if finalizerFn, err := CreateServiceAccount(kubeClient, ns, "../../example/rbac/prometheus/prometheus-service-account.yaml"); err != nil {
|
||||
if finalizerFn, err := createServiceAccount(kubeClient, ns, "../../example/rbac/prometheus/prometheus-service-account.yaml"); err != nil {
|
||||
t.Fatal(errors.Wrap(err, "failed to create prometheus service account"))
|
||||
} else {
|
||||
ctx.AddFinalizerFn(finalizerFn)
|
||||
@@ -419,13 +419,13 @@ func (ctx *TestCtx) SetupPrometheusRBACGlobal(t *testing.T, ns string, kubeClien
|
||||
if _, err := CreateClusterRole(kubeClient, "../../example/rbac/prometheus/prometheus-cluster-role.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
t.Fatalf("failed to create prometheus cluster role: %v", err)
|
||||
}
|
||||
if finalizerFn, err := CreateServiceAccount(kubeClient, ns, "../../example/rbac/prometheus/prometheus-service-account.yaml"); err != nil {
|
||||
if finalizerFn, err := createServiceAccount(kubeClient, ns, "../../example/rbac/prometheus/prometheus-service-account.yaml"); err != nil {
|
||||
t.Fatal(errors.Wrap(err, "failed to create prometheus service account"))
|
||||
} else {
|
||||
ctx.AddFinalizerFn(finalizerFn)
|
||||
}
|
||||
|
||||
if finalizerFn, err := CreateClusterRoleBinding(kubeClient, ns, "../../example/rbac/prometheus/prometheus-cluster-role-binding.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
if finalizerFn, err := createClusterRoleBinding(kubeClient, ns, "../../example/rbac/prometheus/prometheus-cluster-role-binding.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
t.Fatal(errors.Wrap(err, "failed to create prometheus cluster role binding"))
|
||||
} else {
|
||||
ctx.AddFinalizerFn(finalizerFn)
|
||||
|
||||
@@ -298,7 +298,7 @@ func (f *Framework) WaitForPrometheusReady(p *monitoringv1.Prometheus, timeout t
|
||||
var pollErr error
|
||||
|
||||
err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
|
||||
st, _, pollErr := prometheus.PrometheusStatus(context.Background(), f.KubeClient, p)
|
||||
st, _, pollErr := prometheus.Status(context.Background(), f.KubeClient, p)
|
||||
|
||||
if pollErr != nil {
|
||||
return false, nil
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func CreateRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (finalizerFn, error) {
|
||||
func CreateRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (FinalizerFn, error) {
|
||||
finalizerFn := func() error { return DeleteRoleBinding(kubeClient, ns, relativePath) }
|
||||
roleBinding, err := parseRoleBindingYaml(relativePath)
|
||||
if err != nil {
|
||||
@@ -34,7 +34,7 @@ func CreateRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath
|
||||
return finalizerFn, err
|
||||
}
|
||||
|
||||
func CreateRoleBindingForSubjectNamespace(kubeClient kubernetes.Interface, ns, subjectNs string, relativePath string) (finalizerFn, error) {
|
||||
func CreateRoleBindingForSubjectNamespace(kubeClient kubernetes.Interface, ns, subjectNs string, relativePath string) (FinalizerFn, error) {
|
||||
finalizerFn := func() error { return DeleteRoleBinding(kubeClient, ns, relativePath) }
|
||||
roleBinding, err := parseRoleBindingYaml(relativePath)
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func MakeSecretWithCert(kubeClient kubernetes.Interface, ns, name string, keyLis
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
|
||||
for i, _ := range keyList {
|
||||
for i := range keyList {
|
||||
secret.Data[keyList[i]] = dataList[i]
|
||||
}
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ func MakeService(pathToYaml string) (*v1.Service, error) {
|
||||
return &resource, nil
|
||||
}
|
||||
|
||||
func CreateServiceAndWaitUntilReady(kubeClient kubernetes.Interface, namespace string, service *v1.Service) (finalizerFn, error) {
|
||||
func CreateServiceAndWaitUntilReady(kubeClient kubernetes.Interface, namespace string, service *v1.Service) (FinalizerFn, error) {
|
||||
finalizerFn := func() error { return DeleteServiceAndWaitUntilGone(kubeClient, namespace, service.Name) }
|
||||
|
||||
if _, err := kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}); err != nil {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func CreateServiceAccount(kubeClient kubernetes.Interface, namespace string, relativPath string) (finalizerFn, error) {
|
||||
func createServiceAccount(kubeClient kubernetes.Interface, namespace string, relativPath string) (FinalizerFn, error) {
|
||||
finalizerFn := func() error { return DeleteServiceAccount(kubeClient, namespace, relativPath) }
|
||||
|
||||
serviceAccount, err := parseServiceAccountYaml(relativPath)
|
||||
|
||||
@@ -69,7 +69,7 @@ func (f *Framework) WaitForThanosRulerReady(tr *monitoringv1.ThanosRuler, timeou
|
||||
var pollErr error
|
||||
|
||||
err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
|
||||
st, _, pollErr := thanos.ThanosRulerStatus(context.Background(), f.KubeClient, tr)
|
||||
st, _, pollErr := thanos.RulerStatus(context.Background(), f.KubeClient, tr)
|
||||
|
||||
if pollErr != nil {
|
||||
return false, nil
|
||||
|
||||
Reference in New Issue
Block a user