backend/s3: Configure AWS Client MaxRetries and provide enhanced S3 NoSuchBucket error message

The AWS Go SDK automatically provides a default request retryer with exponential backoff that is invoked via setting `MaxRetries` or leaving it `nil` will default to 3. The terraform-aws-provider `config.Client()` sets `MaxRetries` to 0 unless explicitly configured above 0. Previously, we were not overriding this behavior by setting the configuration and therefore not invoking the default request retryer.

The default retryer already handles HTTP error codes above 500, including S3's InternalError response, so the extraneous handling can be removed. This will also start automatically retrying many additional cases, such as temporary networking issues or other retryable AWS service responses.

Changes:
* s3/backend: Add `max_retries` argument
* s3/backend: Enhance S3 NoSuchBucket error to include additional information
This commit is contained in:
Brian Flad 2019-01-09 13:01:37 -05:00
parent e997373f44
commit ed37d07632
No known key found for this signature in database
GPG Key ID: EC6252B42B012823
4 changed files with 55 additions and 56 deletions

View File

@ -219,6 +219,13 @@ func New() backend.Backend {
Description: "Force s3 to use path style api.",
Default: false,
},
"max_retries": {
Type: schema.TypeInt,
Optional: true,
Description: "The maximum number of times an AWS API request is retried on retryable failure.",
Default: 5,
},
},
}
@ -285,6 +292,7 @@ func (b *Backend) configure(ctx context.Context) error {
SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool),
SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool),
S3ForcePathStyle: data.Get("force_path_style").(bool),
MaxRetries: data.Get("max_retries").(int),
}
client, err := cfg.Client()

View File

@ -7,6 +7,7 @@ import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/backend"
@ -29,6 +30,9 @@ func (b *Backend) Workspaces() ([]string, error) {
resp, err := b.s3Client.ListObjects(params)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket {
return nil, fmt.Errorf(errS3NoSuchBucket, err)
}
return nil, err
}

View File

@ -98,30 +98,21 @@ func (c *RemoteClient) get() (*remote.Payload, error) {
var output *s3.GetObjectOutput
var err error
// we immediately retry on an internal error, as those are usually transient
maxRetries := 2
for retryCount := 0; ; retryCount++ {
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
Bucket: &c.bucketName,
Key: &c.path,
})
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
Bucket: &c.bucketName,
Key: &c.path,
})
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
switch awserr.Code() {
case s3.ErrCodeNoSuchKey:
return nil, nil
case s3ErrCodeInternalError:
if retryCount > maxRetries {
return nil, err
}
log.Println("[WARN] s3 internal error, retrying...")
continue
}
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
switch awserr.Code() {
case s3.ErrCodeNoSuchBucket:
return nil, fmt.Errorf(errS3NoSuchBucket, err)
case s3.ErrCodeNoSuchKey:
return nil, nil
}
return nil, err
}
break
return nil, err
}
defer output.Body.Close()
@ -149,46 +140,32 @@ func (c *RemoteClient) Put(data []byte) error {
contentType := "application/json"
contentLength := int64(len(data))
// we immediately retry on an internal error, as those are usually transient
maxRetries := 2
for retryCount := 0; ; retryCount++ {
i := &s3.PutObjectInput{
ContentType: &contentType,
ContentLength: &contentLength,
Body: bytes.NewReader(data),
Bucket: &c.bucketName,
Key: &c.path,
}
i := &s3.PutObjectInput{
ContentType: &contentType,
ContentLength: &contentLength,
Body: bytes.NewReader(data),
Bucket: &c.bucketName,
Key: &c.path,
}
if c.serverSideEncryption {
if c.kmsKeyID != "" {
i.SSEKMSKeyId = &c.kmsKeyID
i.ServerSideEncryption = aws.String("aws:kms")
} else {
i.ServerSideEncryption = aws.String("AES256")
}
if c.serverSideEncryption {
if c.kmsKeyID != "" {
i.SSEKMSKeyId = &c.kmsKeyID
i.ServerSideEncryption = aws.String("aws:kms")
} else {
i.ServerSideEncryption = aws.String("AES256")
}
}
if c.acl != "" {
i.ACL = aws.String(c.acl)
}
if c.acl != "" {
i.ACL = aws.String(c.acl)
}
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
_, err := c.s3Client.PutObject(i)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == s3ErrCodeInternalError {
if retryCount > maxRetries {
return fmt.Errorf("failed to upload state: %s", err)
}
log.Println("[WARN] s3 internal error, retrying...")
continue
}
}
return fmt.Errorf("failed to upload state: %s", err)
}
break
_, err := c.s3Client.PutObject(i)
if err != nil {
return fmt.Errorf("failed to upload state: %s", err)
}
sum := md5.Sum(data)
@ -414,3 +391,12 @@ persists, and neither S3 nor DynamoDB are experiencing an outage, you may need
to manually verify the remote state and update the Digest value stored in the
DynamoDB table to the following value: %x
`
const errS3NoSuchBucket = `S3 bucket does not exist.
The referenced S3 bucket must have been previously created. If the S3 bucket
was created within the last minute, please wait for a minute or two and try
again.
Error: %s
`

View File

@ -180,6 +180,7 @@ The following configuration options or environment variables are supported:
* `skip_region_validation` - (Optional) Skip validation of provided region name.
* `skip_requesting_account_id` - (Optional) Skip requesting the account ID.
* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API check.
* `max_retries` - (Optional) The maximum number of times an AWS API request is retried on retryable failure. Defaults to 5.
## Multi-account AWS Architecture