mirror of
https://github.com/coreos/prometheus-operator.git
synced 2026-02-05 06:45:27 +01:00
chore: reduce e2e test timeouts
This change decreases all timeouts greater than 10 minutes. Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
@@ -2433,7 +2433,7 @@ func testPromAlertmanagerDiscovery(t *testing.T) {
|
||||
t.Fatal(fmt.Errorf("creating Alertmanager service failed: %w", err))
|
||||
}
|
||||
|
||||
err = wait.PollUntilContextTimeout(context.Background(), time.Second, 18*time.Minute, false, isAlertmanagerDiscoveryWorking(ns, svc.Name, alertmanagerName))
|
||||
err = wait.PollUntilContextTimeout(context.Background(), time.Second, 5*time.Minute, false, isAlertmanagerDiscoveryWorking(ns, svc.Name, alertmanagerName))
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Errorf("validating Prometheus Alertmanager discovery failed: %w", err))
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func testPromAgentDaemonSetResourceUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var pollErr error
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 20*time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
dms, err = framework.KubeClient.AppsV1().DaemonSets(ns).Get(ctx, dmsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
pollErr = fmt.Errorf("failed to get Prometheus Agent DaemonSet: %w", err)
|
||||
@@ -304,7 +304,7 @@ func testPromAgentReconcileDaemonSetResourceUpdate(t *testing.T) {
|
||||
framework.KubeClient.AppsV1().DaemonSets(ns).Update(ctx, dms, metav1.UpdateOptions{})
|
||||
|
||||
var pollErr error
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 20*time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
dms, err = framework.KubeClient.AppsV1().DaemonSets(ns).Get(ctx, dmsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
pollErr = fmt.Errorf("failed to get Prometheus Agent DaemonSet: %w", err)
|
||||
@@ -351,17 +351,6 @@ func testPromAgentReconcileDaemonSetResourceDelete(t *testing.T) {
|
||||
dmsName := fmt.Sprintf("prom-agent-%s", p.Name)
|
||||
framework.KubeClient.AppsV1().DaemonSets(ns).Delete(ctx, dmsName, metav1.DeleteOptions{})
|
||||
|
||||
var pollErr error
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 20*time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
dms, _ := framework.KubeClient.AppsV1().DaemonSets(ns).Get(ctx, dmsName, metav1.GetOptions{})
|
||||
if dms.Status.NumberAvailable == 0 {
|
||||
pollErr = fmt.Errorf("no Prometheus Agent DaemonSet available: %w", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
require.NoError(t, pollErr)
|
||||
err = framework.WaitForPrometheusAgentDSReady(ctx, ns, prometheusAgentDSCRD)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func (f *Framework) MakeCRD(source string) (*v1.CustomResourceDefinition, error)
|
||||
|
||||
// WaitForCRDReady waits for a Custom Resource Definition to be available for use.
|
||||
func WaitForCRDReady(listFunc func(opts metav1.ListOptions) (runtime.Object, error)) error {
|
||||
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 10*time.Minute, false, func(_ context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(context.Background(), time.Second, 2*time.Minute, false, func(_ context.Context) (bool, error) {
|
||||
_, err := listFunc(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
if se, ok := err.(*apierrors.StatusError); ok {
|
||||
|
||||
@@ -142,7 +142,7 @@ func (f *Framework) WaitForPrometheusAgentReady(ctx context.Context, p *monitori
|
||||
|
||||
func (f *Framework) WaitForPrometheusAgentDSReady(ctx context.Context, ns string, p *monitoringv1alpha1.PrometheusAgent) error {
|
||||
var pollErr error
|
||||
if err := wait.PollUntilContextTimeout(ctx, 20*time.Second, 20*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
name := fmt.Sprintf("prom-agent-%s", p.Name)
|
||||
// TODO: Implement UpdateStatus() for DaemonSet and check status instead of using Get().
|
||||
dms, err := f.KubeClient.AppsV1().DaemonSets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
@@ -150,10 +150,17 @@ func (f *Framework) WaitForPrometheusAgentDSReady(ctx context.Context, ns string
|
||||
pollErr = fmt.Errorf("failed to get Prometheus Agent DaemonSet: %w", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if dms.ObjectMeta.DeletionTimestamp != nil {
|
||||
pollErr = fmt.Errorf("Prometheus Agent DaemonSet deletion in progress")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if dms.Status.NumberUnavailable > 0 {
|
||||
pollErr = fmt.Errorf("Prometheus Agent DaemonSet is not available")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if dms.Status.NumberReady == 0 {
|
||||
pollErr = fmt.Errorf("Prometheus Agent DaemonSet is not ready")
|
||||
return false, nil
|
||||
@@ -196,7 +203,7 @@ func (f *Framework) DeletePrometheusAgentDSAndWaitUntilGone(ctx context.Context,
|
||||
}
|
||||
|
||||
var pollErr error
|
||||
if err := wait.PollUntilContextTimeout(ctx, 20*time.Second, 20*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
dmsName := fmt.Sprintf("prom-agent-%s", p.Name)
|
||||
dms, _ := f.KubeClient.AppsV1().DaemonSets(ns).Get(ctx, dmsName, metav1.GetOptions{})
|
||||
if dms.Status.NumberAvailable != 0 {
|
||||
|
||||
Reference in New Issue
Block a user