Removing the instance_type check in the ElastiCache cluster creation. We now allow the error to bubble up to the userr when the wrong instance type is used. The limitation for t2 instance types now allowing snapshotting is also now documented

This commit is contained in:
stack72 2015-11-06 11:16:51 +00:00
parent ca2ea80af3
commit 350f91ec06
3 changed files with 21 additions and 52 deletions

View File

@ -205,14 +205,12 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req.CacheParameterGroupName = aws.String(v.(string))
}
if !strings.Contains(d.Get("node_type").(string), "cache.t2") {
if v, ok := d.GetOk("snapshot_retention_limit"); ok {
req.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("snapshot_retention_limit"); ok {
req.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("snapshot_window"); ok {
req.SnapshotWindow = aws.String(v.(string))
}
if v, ok := d.GetOk("snapshot_window"); ok {
req.SnapshotWindow = aws.String(v.(string))
}
if v, ok := d.GetOk("maintenance_window"); ok {
@ -289,12 +287,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
d.Set("security_group_ids", c.SecurityGroups)
d.Set("parameter_group_name", c.CacheParameterGroup)
d.Set("maintenance_window", c.PreferredMaintenanceWindow)
if c.SnapshotWindow != nil {
d.Set("snapshot_window", c.SnapshotWindow)
}
if c.SnapshotRetentionLimit != nil {
d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
}
d.Set("snapshot_window", c.SnapshotWindow)
d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
if c.NotificationConfiguration != nil {
if *c.NotificationConfiguration.TopicStatus == "active" {
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
@ -377,16 +371,15 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
req.EngineVersion = aws.String(d.Get("engine_version").(string))
requestUpdate = true
}
if !strings.Contains(d.Get("node_type").(string), "cache.t2") {
if d.HasChange("snapshot_window") {
req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string))
requestUpdate = true
}
if d.HasChange("snapshot_retention_limit") {
req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
requestUpdate = true
}
if d.HasChange("snapshot_window") {
req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string))
requestUpdate = true
}
if d.HasChange("snapshot_retention_limit") {
req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
requestUpdate = true
}
if d.HasChange("num_cache_nodes") {

View File

@ -33,37 +33,12 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) {
})
}
func TestAccAWSElasticacheCluster_snapshots(t *testing.T) {
var ec elasticache.CacheCluster
ri := genRandInt()
config := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: config,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
var ec elasticache.CacheCluster
ri := genRandInt()
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri)
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri)
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri)
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -221,7 +196,7 @@ provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group"
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
@ -232,7 +207,7 @@ resource "aws_security_group" "bar" {
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group"
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
@ -240,7 +215,7 @@ resource "aws_elasticache_security_group" "bar" {
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "redis"
node_type = "cache.m1.small"
node_type = "cache.t2.small"
num_cache_nodes = 1
port = 6379
parameter_group_name = "default.redis2.8"

View File

@ -88,6 +88,7 @@ SNS topic to send ElastiCache notifications to. Example:
* `tags` - (Optional) A mapping of tags to assign to the resource.
~> **NOTE:** Snapshotting functionality is not compatible with t2 instance types.
## Attributes Reference