Merge #6860: More robust handling of AWS Lambda function archives on S3

This commit is contained in:
Martin Atkins 2016-08-27 13:00:59 -07:00
commit 7c4a3bf967
3 changed files with 121 additions and 20 deletions

View File

@ -10,6 +10,7 @@ FEATURES:
IMPROVEMENTS:
* provider/aws: Add MemoryReservation To `aws_ecs_container_definition` data source [GH-8437]
* provider/aws: Export `arn` of `aws_autoscaling_group` [GH-8503]
* provider/aws: More robust handling of Lambda function archives hosted on S3 [GH-6860]
BUG FIXES:
* core: JSON configuration with resources with a single key parse properly [GH-8485]

View File

@ -301,29 +301,31 @@ func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) e
d.Partial(true)
codeReq := &lambda.UpdateFunctionCodeInput{
FunctionName: aws.String(d.Id()),
}
codeUpdate := false
if d.HasChange("filename") || d.HasChange("source_code_hash") {
name := d.Get("filename").(string)
file, err := loadFileContent(name)
if err != nil {
return fmt.Errorf("Unable to load %q: %s", name, err)
if d.HasChange("filename") || d.HasChange("source_code_hash") || d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version") {
codeReq := &lambda.UpdateFunctionCodeInput{
FunctionName: aws.String(d.Id()),
}
if v, ok := d.GetOk("filename"); ok {
file, err := loadFileContent(v.(string))
if err != nil {
return fmt.Errorf("Unable to load %q: %s", v.(string), err)
}
codeReq.ZipFile = file
} else {
s3Bucket, _ := d.GetOk("s3_bucket")
s3Key, _ := d.GetOk("s3_key")
s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
codeReq.S3Bucket = aws.String(s3Bucket.(string))
codeReq.S3Key = aws.String(s3Key.(string))
if versionOk {
codeReq.S3ObjectVersion = aws.String(s3ObjectVersion.(string))
}
}
codeReq.ZipFile = file
codeUpdate = true
}
if d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version") {
codeReq.S3Bucket = aws.String(d.Get("s3_bucket").(string))
codeReq.S3Key = aws.String(d.Get("s3_key").(string))
codeReq.S3ObjectVersion = aws.String(d.Get("s3_object_version").(string))
codeUpdate = true
}
if codeUpdate {
log.Printf("[DEBUG] Send Update Lambda Function Code request: %#v", codeReq)
_, err := conn.UpdateFunctionCode(codeReq)
if err != nil {
return fmt.Errorf("Error modifying Lambda Function Code %s: %s", d.Id(), err)

View File

@ -230,6 +230,60 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
})
}
func TestAccAWSLambdaFunction_s3Update_unversioned(t *testing.T) {
var conf lambda.GetFunctionOutput
path, zipFile, err := createTempFile("lambda_s3Update")
if err != nil {
t.Fatal(err)
}
defer os.Remove(path)
bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger)
key := "lambda-func.zip"
key2 := "lambda-func-modified.zip"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckLambdaFunctionDestroy,
Steps: []resource.TestStep{
resource.TestStep{
PreConfig: func() {
// Upload 1st version
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
},
Config: genAWSLambdaFunctionConfig_s3_unversioned(bucketName, key, path),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3_unversioned", &conf),
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3_unversioned"),
testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_s3_unversioned"),
testAccCheckAwsLambdaSourceCodeHash(&conf, "un6qF9S9hKvXbWwJ6m2EYaVCWjcr0PCZWiTV3h4zB0I="),
),
},
resource.TestStep{
ExpectNonEmptyPlan: true,
PreConfig: func() {
// Upload 2nd version
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
},
Config: genAWSLambdaFunctionConfig_s3_unversioned(bucketName, key2, path),
},
// Extra step because of missing ComputedWhen
// See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330
resource.TestStep{
Config: genAWSLambdaFunctionConfig_s3_unversioned(bucketName, key2, path),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3_unversioned", &conf),
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3_unversioned"),
testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_s3_unversioned"),
testAccCheckAwsLambdaSourceCodeHash(&conf, "Y5Jf4Si63UDy1wKNfPs+U56ZL0NxsieKPt9EwRl4GQM="),
),
},
},
})
}
func testAccCheckLambdaFunctionDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).lambdaconn
@ -632,3 +686,47 @@ func genAWSLambdaFunctionConfig_s3(bucket, key, path string) string {
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_tpl,
bucket, key, path, path)
}
const testAccAWSLambdaFunctionConfig_s3_unversioned_tpl = `
resource "aws_s3_bucket" "artifacts" {
bucket = "%s"
acl = "private"
force_destroy = true
}
resource "aws_s3_bucket_object" "o" {
bucket = "${aws_s3_bucket.artifacts.bucket}"
key = "%s"
source = "%s"
etag = "${md5(file("%s"))}"
}
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_lambda_function" "lambda_function_s3" {
s3_bucket = "${aws_s3_bucket_object.o.bucket}"
s3_key = "${aws_s3_bucket_object.o.key}"
function_name = "tf_acc_lambda_name_s3_unversioned"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "exports.example"
}
`
func genAWSLambdaFunctionConfig_s3_unversioned(bucket, key, path string) string {
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_unversioned_tpl,
bucket, key, path, path)
}