Add ability to set Requests Payer in aws_s3_bucket. (#8065)

Any S3 Bucket owner may wish to share data but not incur charges associated
with others accessing the data. This commit adds an optional "request_payer"
attribute to the aws_s3_bucket resource so that the owner of the S3 bucket can
specify who should bear the cost of Amazon S3 data transfer.

Signed-off-by: Krzysztof Wilczynski <krzysztof.wilczynski@linux.com>
This commit is contained in:
Krzysztof Wilczynski 2016-08-10 08:01:17 +09:00 committed by Paul Stack
parent f18e2546b3
commit 92d75b263c
3 changed files with 174 additions and 4 deletions

View File

@ -286,8 +286,6 @@ func resourceAwsS3Bucket() *schema.Resource {
},
},
"tags": tagsSchema(),
"force_destroy": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
@ -300,6 +298,15 @@ func resourceAwsS3Bucket() *schema.Resource {
Computed: true,
ValidateFunc: validateS3BucketAccelerationStatus,
},
"request_payer": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validateS3BucketRequestPayerType,
},
"tags": tagsSchema(),
},
}
}
@ -408,6 +415,12 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
}
}
if d.HasChange("request_payer") {
if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil {
return err
}
}
return resourceAwsS3BucketRead(d, meta)
}
@ -568,6 +581,20 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
d.Set("acceleration_status", accelerate.Status)
}
// Read the request payer configuration.
payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
Bucket: aws.String(d.Id()),
})
if err != nil {
return err
}
log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer)
if payer.Payer != nil {
if err := d.Set("request_payer", *payer.Payer); err != nil {
return err
}
}
// Read the logging configuration
logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
Bucket: aws.String(d.Id()),
@ -575,6 +602,7 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return err
}
log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
if v := logging.LoggingEnabled; v != nil {
lcl := make([]map[string]interface{}, 0, 1)
@ -1163,6 +1191,26 @@ func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData
return nil
}
func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
payer := d.Get("request_payer").(string)
i := &s3.PutBucketRequestPaymentInput{
Bucket: aws.String(bucket),
RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
Payer: aws.String(payer),
},
}
log.Printf("[DEBUG] S3 put bucket request payer: %#v", i)
_, err := s3conn.PutBucketRequestPayment(i)
if err != nil {
return fmt.Errorf("Error putting S3 request payer: %s", err)
}
return nil
}
func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
@ -1370,6 +1418,16 @@ func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, e
return
}
func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value != s3.PayerRequester && value != s3.PayerBucketOwner {
errors = append(errors, fmt.Errorf(
"%q contains an invalid Request Payer type %q. Valid types are either %q or %q",
k, value, s3.PayerRequester, s3.PayerBucketOwner))
}
return
}
func expirationHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})

View File

@ -77,6 +77,72 @@ func TestAccAWSS3Bucket_acceleration(t *testing.T) {
})
}
func TestAccAWSS3Bucket_RequestPayer(t *testing.T) {
rInt := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketConfigRequestPayerBucketOwner(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket",
"request_payer",
"BucketOwner"),
testAccCheckAWSS3RequestPayer(
"aws_s3_bucket.bucket",
"BucketOwner"),
),
},
resource.TestStep{
Config: testAccAWSS3BucketConfigRequestPayerRequester(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket",
"request_payer",
"Requester"),
testAccCheckAWSS3RequestPayer(
"aws_s3_bucket.bucket",
"Requester"),
),
},
},
})
}
func TestResourceAWSS3BucketRequestPayer_validation(t *testing.T) {
_, errors := validateS3BucketRequestPayerType("incorrect", "request_payer")
if len(errors) == 0 {
t.Fatalf("Expected to trigger a validation error")
}
var testCases = []struct {
Value string
ErrCount int
}{
{
Value: "Requester",
ErrCount: 0,
},
{
Value: "BucketOwner",
ErrCount: 0,
},
}
for _, tc := range testCases {
_, errors := validateS3BucketRequestPayerType(tc.Value, "request_payer")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected not to trigger a validation error")
}
}
}
func TestAccAWSS3Bucket_Policy(t *testing.T) {
rInt := acctest.RandInt()
@ -689,6 +755,28 @@ func testAccCheckAWSS3BucketCors(n string, corsRules []*s3.CORSRule) resource.Te
}
}
func testAccCheckAWSS3RequestPayer(n, expectedPayer string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, _ := s.RootModule().Resources[n]
conn := testAccProvider.Meta().(*AWSClient).s3conn
out, err := conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
Bucket: aws.String(rs.Primary.ID),
})
if err != nil {
return fmt.Errorf("GetBucketRequestPayment error: %v", err)
}
if *out.Payer != expectedPayer {
return fmt.Errorf("bad error request payer type, expected: %v, got %v",
expectedPayer, out.Payer)
}
return nil
}
}
func testAccCheckAWSS3BucketLogging(n, b, p string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, _ := s.RootModule().Resources[n]
@ -844,6 +932,26 @@ resource "aws_s3_bucket" "bucket" {
`, randInt)
}
func testAccAWSS3BucketConfigRequestPayerBucketOwner(randInt int) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d"
acl = "public-read"
request_payer = "BucketOwner"
}
`, randInt)
}
func testAccAWSS3BucketConfigRequestPayerRequester(randInt int) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d"
acl = "public-read"
request_payer = "Requester"
}
`, randInt)
}
func testAccAWSS3BucketConfigWithPolicy(randInt int) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {

View File

@ -173,8 +173,12 @@ The following arguments are supported:
* `logging` - (Optional) A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
* `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
* `acceleration_status` - (Optional) Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
* `request_payer` - (Optional) Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
~> **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1`
~> **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1`
The `website` object supports the following:
@ -218,7 +222,7 @@ The `expiration` object supports the following
* `date` (Optional) Specifies the date after which you want the corresponding action to take effect.
* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect.
* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers.
* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers.
The `transition` object supports the following