mirror of
https://github.com/openshift/installer.git
synced 2026-02-06 18:47:19 +01:00
pkg/destroy/aws: Don't give up on Route 53 rate limits
We've been hitting Route 53 rate limits in the busy CI account: level=debug msg="Deleting Route53 zones (map[openshiftClusterID:5b0921a0-5e21-4ebf-a5f9-396a92526ec1])" level=debug msg="Deleting Route53 zones (map[kubernetes.io/cluster/ci-op-piz2m00h-1d3f3:owned])" level=debug msg="error converting r53Zones to native AWS objects: Throttling: Rate exceeded\n\tstatus code: 400, request id: 80e10c03-0306-11e9-b9b6-abeb053f0218" level=debug msg="Exiting deleting Route53 zones (map[kubernetes.io/cluster/ci-op-piz2m00h-1d3f3:owned])" level=debug msg="error converting r53Zones to native AWS objects: Throttling: Rate exceeded\n\tstatus code: 400, request id: 81cd4026-0306-11e9-9710-21e3250d9953" level=debug msg="Exiting deleting Route53 zones (map[openshiftClusterID:5b0921a0-5e21-4ebf-a5f9-396a92526ec1])" We've had trouble with Route 53 rate limits before; see discussion in openshift/hive@f945dbb3 (awstagdeprovision: Ignore more errors, 2018-11-27, openshift/hive#113). With this commit, instead of bailing part way through listing tags for all the hosted zones, we just retry that particular zone until it goes through and keep going on tags for the whole list. This should reduce our overall load on the Route 53 APIs.
This commit is contained in:
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
@@ -1192,16 +1193,27 @@ func deleteS3Buckets(session *session.Session, filter AWSFilter, clusterName str
|
||||
}
|
||||
|
||||
// r53ZonesToAWSObjects will create a list of awsObjectsWithTags for the provided list of route53.HostedZone s
|
||||
func r53ZonesToAWSObjects(zones []*route53.HostedZone, r53Client *route53.Route53) ([]awsObjectWithTags, error) {
|
||||
func r53ZonesToAWSObjects(zones []*route53.HostedZone, r53Client *route53.Route53, logger log.FieldLogger) ([]awsObjectWithTags, error) {
|
||||
zonesAsAWSObjects := []awsObjectWithTags{}
|
||||
|
||||
var result *route53.ListTagsForResourceOutput
|
||||
var err error
|
||||
for _, zone := range zones {
|
||||
result, err := r53Client.ListTagsForResource(&route53.ListTagsForResourceInput{
|
||||
ResourceType: aws.String("hostedzone"),
|
||||
ResourceId: zone.Id,
|
||||
})
|
||||
if err != nil {
|
||||
return zonesAsAWSObjects, err
|
||||
for {
|
||||
result, err = r53Client.ListTagsForResource(&route53.ListTagsForResourceInput{
|
||||
ResourceType: aws.String("hostedzone"),
|
||||
ResourceId: zone.Id,
|
||||
})
|
||||
if err != nil {
|
||||
if request.IsErrorThrottle(err) {
|
||||
logger.Debugf("sleeping before trying to resolve tags for zone %s: %v", zone.Id, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
return zonesAsAWSObjects, err
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
tagsToMap, err := tagsToMap(result.ResourceTagSet.Tags)
|
||||
@@ -1370,7 +1382,7 @@ func deleteRoute53(session *session.Session, filters AWSFilter, clusterName stri
|
||||
return false, nil
|
||||
}
|
||||
|
||||
awsZones, err := r53ZonesToAWSObjects(allZones.HostedZones, r53Client)
|
||||
awsZones, err := r53ZonesToAWSObjects(allZones.HostedZones, r53Client, logger)
|
||||
if err != nil {
|
||||
logger.Debugf("error converting r53Zones to native AWS objects: %v", err)
|
||||
return false, nil
|
||||
|
||||
Reference in New Issue
Block a user