Merge pull request #5305 from TimeIncOSS/f-s3-obj-update-and-versioning

provider/aws: Enable updates & versioning for s3_bucket_object
This commit is contained in:
Radek Simko 2016-02-25 12:19:34 +00:00
commit 4b8726dc2b
3 changed files with 234 additions and 55 deletions

View File

@ -6,6 +6,7 @@ import (
"io"
"log"
"os"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/mitchellh/go-homedir"
@ -19,6 +20,7 @@ func resourceAwsS3BucketObject() *schema.Resource {
return &schema.Resource{
Create: resourceAwsS3BucketObjectPut,
Read: resourceAwsS3BucketObjectRead,
Update: resourceAwsS3BucketObjectPut,
Delete: resourceAwsS3BucketObjectDelete,
Schema: map[string]*schema.Schema{
@ -31,31 +33,26 @@ func resourceAwsS3BucketObject() *schema.Resource {
"cache_control": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"content_disposition": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"content_encoding": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"content_language": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"content_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
@ -68,18 +65,25 @@ func resourceAwsS3BucketObject() *schema.Resource {
"source": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"content"},
},
"content": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"source"},
},
"etag": &schema.Schema{
Type: schema.TypeString,
// This will conflict with SSE-C and SSE-KMS encryption and multi-part upload
// if/when it's actually implemented. The Etag then won't match raw-file MD5.
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
Optional: true,
Computed: true,
},
"version_id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
@ -110,9 +114,9 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro
content := v.(string)
body = bytes.NewReader([]byte(content))
} else {
return fmt.Errorf("Must specify \"source\" or \"content\" field")
}
putInput := &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
@ -144,9 +148,12 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro
return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err)
}
d.Set("etag", resp.ETag)
// See https://forums.aws.amazon.com/thread.jspa?threadID=44003
d.Set("etag", strings.Trim(*resp.ETag, `"`))
d.Set("version_id", resp.VersionId)
d.SetId(key)
return nil
return resourceAwsS3BucketObjectRead(d, meta)
}
func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error {
@ -178,6 +185,7 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err
d.Set("content_encoding", resp.ContentEncoding)
d.Set("content_language", resp.ContentLanguage)
d.Set("content_type", resp.ContentType)
d.Set("version_id", resp.VersionId)
log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
return nil
@ -189,13 +197,40 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e
bucket := d.Get("bucket").(string)
key := d.Get("key").(string)
_, err := s3conn.DeleteObject(
&s3.DeleteObjectInput{
if _, ok := d.GetOk("version_id"); ok {
// Bucket is versioned, we need to delete all versions
vInput := s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
}
out, err := s3conn.ListObjectVersions(&vInput)
if err != nil {
return fmt.Errorf("Failed listing S3 object versions: %s", err)
}
for _, v := range out.Versions {
input := s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
VersionId: v.VersionId,
}
_, err := s3conn.DeleteObject(&input)
if err != nil {
return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s",
key, v, err)
}
}
} else {
// Just delete the object
input := s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
return fmt.Errorf("Error deleting S3 bucket object: %s", err)
}
_, err := s3conn.DeleteObject(&input)
if err != nil {
return fmt.Errorf("Error deleting S3 bucket object: %s", err)
}
}
return nil
}

View File

@ -14,25 +14,29 @@ import (
"github.com/aws/aws-sdk-go/service/s3"
)
var tf, err = ioutil.TempFile("", "tf")
func TestAccAWSS3BucketObject_source(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "tf-acc-s3-obj-source")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpFile.Name())
rInt := acctest.RandInt()
// first write some data to the tempfile just so it's not 0 bytes.
ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644)
err = ioutil.WriteFile(tmpFile.Name(), []byte("{anything will do }"), 0644)
if err != nil {
t.Fatal(err)
}
var obj s3.GetObjectOutput
resource.Test(t, resource.TestCase{
PreCheck: func() {
if err != nil {
panic(err)
}
testAccPreCheck(t)
},
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketObjectConfigSource(rInt),
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
Config: testAccAWSS3BucketObjectConfigSource(rInt, tmpFile.Name()),
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &obj),
},
},
})
@ -40,42 +44,47 @@ func TestAccAWSS3BucketObject_source(t *testing.T) {
func TestAccAWSS3BucketObject_content(t *testing.T) {
rInt := acctest.RandInt()
var obj s3.GetObjectOutput
resource.Test(t, resource.TestCase{
PreCheck: func() {
if err != nil {
panic(err)
}
testAccPreCheck(t)
},
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketObjectConfigContent(rInt),
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
PreConfig: func() {},
Config: testAccAWSS3BucketObjectConfigContent(rInt),
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &obj),
},
},
})
}
func TestAccAWSS3BucketObject_withContentCharacteristics(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "tf-acc-s3-obj-content-characteristics")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpFile.Name())
rInt := acctest.RandInt()
// first write some data to the tempfile just so it's not 0 bytes.
ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644)
err = ioutil.WriteFile(tmpFile.Name(), []byte("{anything will do }"), 0644)
if err != nil {
t.Fatal(err)
}
var obj s3.GetObjectOutput
resource.Test(t, resource.TestCase{
PreCheck: func() {
if err != nil {
panic(err)
}
testAccPreCheck(t)
},
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketObjectConfig_withContentCharacteristics(rInt),
Config: testAccAWSS3BucketObjectConfig_withContentCharacteristics(rInt, tmpFile.Name()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &obj),
resource.TestCheckResourceAttr(
"aws_s3_bucket_object.object", "content_type", "binary/octet-stream"),
),
@ -84,6 +93,111 @@ func TestAccAWSS3BucketObject_withContentCharacteristics(t *testing.T) {
})
}
func TestAccAWSS3BucketObject_updates(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "tf-acc-s3-obj-updates")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpFile.Name())
rInt := acctest.RandInt()
err = ioutil.WriteFile(tmpFile.Name(), []byte("initial object state"), 0644)
if err != nil {
t.Fatal(err)
}
var obj s3.GetObjectOutput
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketObjectConfig_updates(rInt, tmpFile.Name()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &obj),
resource.TestCheckResourceAttr("aws_s3_bucket_object.object", "etag", "647d1d58e1011c743ec67d5e8af87b53"),
),
},
resource.TestStep{
PreConfig: func() {
err = ioutil.WriteFile(tmpFile.Name(), []byte("modified object"), 0644)
if err != nil {
t.Fatal(err)
}
},
Config: testAccAWSS3BucketObjectConfig_updates(rInt, tmpFile.Name()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &obj),
resource.TestCheckResourceAttr("aws_s3_bucket_object.object", "etag", "1c7fd13df1515c2a13ad9eb068931f09"),
),
},
},
})
}
func TestAccAWSS3BucketObject_updatesWithVersioning(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "tf-acc-s3-obj-updates-w-versions")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpFile.Name())
rInt := acctest.RandInt()
err = ioutil.WriteFile(tmpFile.Name(), []byte("initial versioned object state"), 0644)
if err != nil {
t.Fatal(err)
}
var originalObj, modifiedObj s3.GetObjectOutput
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketObjectConfig_updatesWithVersioning(rInt, tmpFile.Name()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &originalObj),
resource.TestCheckResourceAttr("aws_s3_bucket_object.object", "etag", "cee4407fa91906284e2a5e5e03e86b1b"),
),
},
resource.TestStep{
PreConfig: func() {
err = ioutil.WriteFile(tmpFile.Name(), []byte("modified versioned object"), 0644)
if err != nil {
t.Fatal(err)
}
},
Config: testAccAWSS3BucketObjectConfig_updatesWithVersioning(rInt, tmpFile.Name()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &modifiedObj),
resource.TestCheckResourceAttr("aws_s3_bucket_object.object", "etag", "00b8c73b1b50e7cc932362c7225b8e29"),
testAccCheckAWSS3BucketObjectVersionIdDiffers(&originalObj, &modifiedObj),
),
},
},
})
}
func testAccCheckAWSS3BucketObjectVersionIdDiffers(first, second *s3.GetObjectOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
if first.VersionId == nil {
return fmt.Errorf("Expected first object to have VersionId: %s", first)
}
if second.VersionId == nil {
return fmt.Errorf("Expected second object to have VersionId: %s", second)
}
if *first.VersionId == *second.VersionId {
return fmt.Errorf("Expected Version IDs to differ, but they are equal (%s)", *first.VersionId)
}
return nil
}
}
func testAccCheckAWSS3BucketObjectDestroy(s *terraform.State) error {
s3conn := testAccProvider.Meta().(*AWSClient).s3conn
@ -105,11 +219,8 @@ func testAccCheckAWSS3BucketObjectDestroy(s *terraform.State) error {
return nil
}
func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc {
func testAccCheckAWSS3BucketObjectExists(n string, obj *s3.GetObjectOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
defer os.Remove(tf.Name())
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not Found: %s", n)
@ -120,7 +231,7 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc {
}
s3conn := testAccProvider.Meta().(*AWSClient).s3conn
_, err := s3conn.GetObject(
out, err := s3conn.GetObject(
&s3.GetObjectInput{
Bucket: aws.String(rs.Primary.Attributes["bucket"]),
Key: aws.String(rs.Primary.Attributes["key"]),
@ -129,11 +240,14 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc {
if err != nil {
return fmt.Errorf("S3Bucket Object error: %s", err)
}
*obj = *out
return nil
}
}
func testAccAWSS3BucketObjectConfigSource(randInt int) string {
func testAccAWSS3BucketObjectConfigSource(randInt int, source string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "object_bucket" {
bucket = "tf-object-test-bucket-%d"
@ -144,10 +258,10 @@ resource "aws_s3_bucket_object" "object" {
source = "%s"
content_type = "binary/octet-stream"
}
`, randInt, tf.Name())
`, randInt, source)
}
func testAccAWSS3BucketObjectConfig_withContentCharacteristics(randInt int) string {
func testAccAWSS3BucketObjectConfig_withContentCharacteristics(randInt int, source string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "object_bucket_2" {
bucket = "tf-object-test-bucket-%d"
@ -160,7 +274,7 @@ resource "aws_s3_bucket_object" "object" {
content_language = "en"
content_type = "binary/octet-stream"
}
`, randInt, tf.Name())
`, randInt, source)
}
func testAccAWSS3BucketObjectConfigContent(randInt int) string {
@ -175,3 +289,36 @@ resource "aws_s3_bucket_object" "object" {
}
`, randInt)
}
func testAccAWSS3BucketObjectConfig_updates(randInt int, source string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "object_bucket_3" {
bucket = "tf-object-test-bucket-%d"
}
resource "aws_s3_bucket_object" "object" {
bucket = "${aws_s3_bucket.object_bucket_3.bucket}"
key = "updateable-key"
source = "%s"
etag = "${md5(file("%s"))}"
}
`, randInt, source, source)
}
func testAccAWSS3BucketObjectConfig_updatesWithVersioning(randInt int, source string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "object_bucket_3" {
bucket = "tf-object-test-bucket-%d"
versioning {
enabled = true
}
}
resource "aws_s3_bucket_object" "object" {
bucket = "${aws_s3_bucket.object_bucket_3.bucket}"
key = "updateable-key"
source = "%s"
etag = "${md5(file("%s"))}"
}
`, randInt, source, source)
}

View File

@ -565,9 +565,6 @@ func TestRulesMixedMatching(t *testing.T) {
for i, c := range cases {
saves := matchRules("ingress", c.local, c.remote)
log.Printf("\n======\n\nSaves:\n%#v\n\nCS Saves:\n%#v\n\n======\n", saves, c.saves)
if err != nil {
t.Fatal(err)
}
log.Printf("\n\tTest %d:\n", i)
if len(saves) != len(c.saves) {