convert S3 remote state to a backend

Move the S3 State from a legacy remote state to an official backend.

This increases test coverage, uses a set schema for configuration, and
will allow new backend features to be implemented for the S3 state, e.g.
"environments".
This commit is contained in:
James Bardin 2017-03-21 13:43:31 -04:00
parent b7152c4405
commit 9f5cf2b105
8 changed files with 511 additions and 339 deletions

View File

@ -12,6 +12,7 @@ import (
backendlocal "github.com/hashicorp/terraform/backend/local"
backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul"
backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
)
// backends is the list of available backends. This is a global variable
@ -36,6 +37,7 @@ func init() {
"local": func() backend.Backend { return &backendlocal.Local{} },
"consul": func() backend.Backend { return backendconsul.New() },
"inmem": func() backend.Backend { return backendinmem.New() },
"s3": func() backend.Backend { return backendS3.New() },
}
// Add the legacy remote backends that haven't yet been convertd to

View File

@ -0,0 +1,199 @@
package s3
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
cleanhttp "github.com/hashicorp/go-cleanhttp"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
terraformAWS "github.com/hashicorp/terraform/builtin/providers/aws"
)
// New creates a new backend for S3 remote state.
func New() backend.Backend {
s := &schema.Backend{
Schema: map[string]*schema.Schema{
"bucket": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "The name of the S3 bucket",
},
"key": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "The path to the state file inside the bucket",
},
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "The region of the S3 bucket.",
DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil),
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "A custom endpoint for the S3 API",
DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""),
},
"encrypt": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Description: "Whether to enable server side encryption of the state file",
Default: false,
},
"acl": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Canned ACL to be applied to the state file",
Default: "",
},
"access_key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "AWS access key",
Default: "",
},
"secret_key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "AWS secret key",
Default: "",
},
"kms_key_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The ARN of a KMS Key to use for encrypting the state",
Default: "",
},
"lock_table": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "DynamoDB table for state locking",
Default: "",
},
"profile": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "AWS profile name",
Default: "",
},
"shared_credentials_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Path to a shared credentials file",
Default: "",
},
"token": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "MFA token",
Default: "",
},
"role_arn": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The role to be assumed",
Default: "",
},
},
}
result := &Backend{Backend: s}
result.Backend.ConfigureFunc = result.configure
return result
}
type Backend struct {
*schema.Backend
// The fields below are set from configure
client *S3Client
}
func (b *Backend) configure(ctx context.Context) error {
if b.client != nil {
return nil
}
// Grab the resource data
data := schema.FromContextBackendConfig(ctx)
bucketName := data.Get("bucket").(string)
keyName := data.Get("key").(string)
endpoint := data.Get("endpoint").(string)
region := data.Get("region").(string)
serverSideEncryption := data.Get("encrypt").(bool)
acl := data.Get("acl").(string)
kmsKeyID := data.Get("kms_key_id").(string)
lockTable := data.Get("lock_table").(string)
var errs []error
creds, err := terraformAWS.GetCredentials(&terraformAWS.Config{
AccessKey: data.Get("access_key").(string),
SecretKey: data.Get("secret_key").(string),
Token: data.Get("token").(string),
Profile: data.Get("profile").(string),
CredsFilename: data.Get("shared_credentials_file").(string),
AssumeRoleARN: data.Get("role_arn").(string),
})
if err != nil {
return err
}
// Call Get to check for credential provider. If nothing found, we'll get an
// error, and we can present it nicely to the user
_, err = creds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
errs = append(errs, fmt.Errorf(`No valid credential sources found for AWS S3 remote.
Please see https://www.terraform.io/docs/state/remote/s3.html for more information on
providing credentials for the AWS S3 remote`))
} else {
errs = append(errs, fmt.Errorf("Error loading credentials for AWS S3 remote: %s", err))
}
return &multierror.Error{Errors: errs}
}
awsConfig := &aws.Config{
Credentials: creds,
Endpoint: aws.String(endpoint),
Region: aws.String(region),
HTTPClient: cleanhttp.DefaultClient(),
}
sess := session.New(awsConfig)
nativeClient := s3.New(sess)
dynClient := dynamodb.New(sess)
b.client = &S3Client{
nativeClient: nativeClient,
bucketName: bucketName,
keyName: keyName,
serverSideEncryption: serverSideEncryption,
acl: acl,
kmsKeyID: kmsKeyID,
dynClient: dynClient,
lockTable: lockTable,
}
return nil
}

View File

@ -0,0 +1,27 @@
package s3
import (
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
const (
keyEnvPrefix = "-env:"
)
func (b *Backend) States() ([]string, error) {
return nil, backend.ErrNamedStatesNotSupported
}
func (b *Backend) DeleteState(name string) error {
return backend.ErrNamedStatesNotSupported
}
func (b *Backend) State(name string) (state.State, error) {
if name != backend.DefaultStateName {
return nil, backend.ErrNamedStatesNotSupported
}
return &remote.State{Client: b.client}, nil
}

View File

@ -0,0 +1,202 @@
package s3
import (
"fmt"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/backend"
)
// verify that we are doing ACC tests or the S3 tests specifically
func testACC(t *testing.T) {
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_S3_TEST") == ""
if skip {
t.Log("s3 backend tests require setting TF_ACC or TF_S3_TEST")
t.Skip()
}
if os.Getenv("AWS_DEFAULT_REGION") == "" {
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
}
}
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}
func TestBackendConfig(t *testing.T) {
// This test just instantiates the client. Shouldn't make any actual
// requests nor incur any costs.
config := map[string]interface{}{
"region": "us-west-1",
"bucket": "tf-test",
"key": "state",
"encrypt": true,
"access_key": "ACCESS_KEY",
"secret_key": "SECRET_KEY",
"lock_table": "dynamoTable",
}
b := backend.TestBackendConfig(t, New(), config).(*Backend)
if *b.client.nativeClient.Config.Region != "us-west-1" {
t.Fatalf("Incorrect region was populated")
}
if b.client.bucketName != "tf-test" {
t.Fatalf("Incorrect bucketName was populated")
}
if b.client.keyName != "state" {
t.Fatalf("Incorrect keyName was populated")
}
credentials, err := b.client.nativeClient.Config.Credentials.Get()
if err != nil {
t.Fatalf("Error when requesting credentials")
}
if credentials.AccessKeyID != "ACCESS_KEY" {
t.Fatalf("Incorrect Access Key Id was populated")
}
if credentials.SecretAccessKey != "SECRET_KEY" {
t.Fatalf("Incorrect Secret Access Key was populated")
}
}
func TestBackend(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
}).(*Backend)
createS3Bucket(t, b.client, bucketName)
defer deleteS3Bucket(t, b.client, bucketName)
backend.TestBackend(t, b, nil)
}
func TestBackendLocked(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
createS3Bucket(t, b1.client, bucketName)
defer deleteS3Bucket(t, b1.client, bucketName)
createDynamoDBTable(t, b1.client, bucketName)
defer deleteDynamoDBTable(t, b1.client, bucketName)
backend.TestBackend(t, b1, b2)
}
func createS3Bucket(t *testing.T, c *S3Client, bucketName string) {
createBucketReq := &s3.CreateBucketInput{
Bucket: &bucketName,
}
// Be clear about what we're doing in case the user needs to clean
// this up later.
t.Logf("creating S3 bucket %s in %s", bucketName, *c.nativeClient.Config.Region)
_, err := c.nativeClient.CreateBucket(createBucketReq)
if err != nil {
t.Fatal("failed to create test S3 bucket:", err)
}
}
func deleteS3Bucket(t *testing.T, c *S3Client, bucketName string) {
deleteBucketReq := &s3.DeleteBucketInput{
Bucket: &bucketName,
}
_, err := c.nativeClient.DeleteBucket(deleteBucketReq)
if err != nil {
t.Logf("WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)", err)
}
}
// create the dynamoDB table, and wait until we can query it.
func createDynamoDBTable(t *testing.T, c *S3Client, tableName string) {
createInput := &dynamodb.CreateTableInput{
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String("LockID"),
AttributeType: aws.String("S"),
},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String("LockID"),
KeyType: aws.String("HASH"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(5),
WriteCapacityUnits: aws.Int64(5),
},
TableName: aws.String(tableName),
}
_, err := c.dynClient.CreateTable(createInput)
if err != nil {
t.Fatal(err)
}
// now wait until it's ACTIVE
start := time.Now()
time.Sleep(time.Second)
describeInput := &dynamodb.DescribeTableInput{
TableName: aws.String(tableName),
}
for {
resp, err := c.dynClient.DescribeTable(describeInput)
if err != nil {
t.Fatal(err)
}
if *resp.Table.TableStatus == "ACTIVE" {
return
}
if time.Since(start) > time.Minute {
t.Fatalf("timed out creating DynamoDB table %s", tableName)
}
time.Sleep(3 * time.Second)
}
}
func deleteDynamoDBTable(t *testing.T, c *S3Client, tableName string) {
params := &dynamodb.DeleteTableInput{
TableName: aws.String(tableName),
}
_, err := c.dynClient.DeleteTable(params)
if err != nil {
t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err)
}
}

View File

@ -1,4 +1,4 @@
package remote
package s3
import (
"bytes"
@ -6,112 +6,17 @@ import (
"fmt"
"io"
"log"
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror"
multierror "github.com/hashicorp/go-multierror"
uuid "github.com/hashicorp/go-uuid"
terraformAws "github.com/hashicorp/terraform/builtin/providers/aws"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
func s3Factory(conf map[string]string) (Client, error) {
bucketName, ok := conf["bucket"]
if !ok {
return nil, fmt.Errorf("missing 'bucket' configuration")
}
keyName, ok := conf["key"]
if !ok {
return nil, fmt.Errorf("missing 'key' configuration")
}
endpoint, ok := conf["endpoint"]
if !ok {
endpoint = os.Getenv("AWS_S3_ENDPOINT")
}
regionName, ok := conf["region"]
if !ok {
regionName = os.Getenv("AWS_DEFAULT_REGION")
if regionName == "" {
return nil, fmt.Errorf(
"missing 'region' configuration or AWS_DEFAULT_REGION environment variable")
}
}
serverSideEncryption := false
if raw, ok := conf["encrypt"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, fmt.Errorf(
"'encrypt' field couldn't be parsed as bool: %s", err)
}
serverSideEncryption = v
}
acl := ""
if raw, ok := conf["acl"]; ok {
acl = raw
}
kmsKeyID := conf["kms_key_id"]
var errs []error
creds, err := terraformAws.GetCredentials(&terraformAws.Config{
AccessKey: conf["access_key"],
SecretKey: conf["secret_key"],
Token: conf["token"],
Profile: conf["profile"],
CredsFilename: conf["shared_credentials_file"],
AssumeRoleARN: conf["role_arn"],
})
if err != nil {
return nil, err
}
// Call Get to check for credential provider. If nothing found, we'll get an
// error, and we can present it nicely to the user
_, err = creds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
errs = append(errs, fmt.Errorf(`No valid credential sources found for AWS S3 remote.
Please see https://www.terraform.io/docs/state/remote/s3.html for more information on
providing credentials for the AWS S3 remote`))
} else {
errs = append(errs, fmt.Errorf("Error loading credentials for AWS S3 remote: %s", err))
}
return nil, &multierror.Error{Errors: errs}
}
awsConfig := &aws.Config{
Credentials: creds,
Endpoint: aws.String(endpoint),
Region: aws.String(regionName),
HTTPClient: cleanhttp.DefaultClient(),
}
sess := session.New(awsConfig)
nativeClient := s3.New(sess)
dynClient := dynamodb.New(sess)
return &S3Client{
nativeClient: nativeClient,
bucketName: bucketName,
keyName: keyName,
serverSideEncryption: serverSideEncryption,
acl: acl,
kmsKeyID: kmsKeyID,
dynClient: dynClient,
lockTable: conf["lock_table"],
}, nil
}
type S3Client struct {
nativeClient *s3.S3
bucketName string
@ -123,7 +28,7 @@ type S3Client struct {
lockTable string
}
func (c *S3Client) Get() (*Payload, error) {
func (c *S3Client) Get() (*remote.Payload, error) {
output, err := c.nativeClient.GetObject(&s3.GetObjectInput{
Bucket: &c.bucketName,
Key: &c.keyName,
@ -148,7 +53,7 @@ func (c *S3Client) Get() (*Payload, error) {
return nil, fmt.Errorf("Failed to read remote state: %s", err)
}
payload := &Payload{
payload := &remote.Payload{
Data: buf.Bytes(),
}

View File

@ -0,0 +1,76 @@
package s3
import (
"fmt"
"testing"
"time"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(S3Client)
var _ remote.ClientLocker = new(S3Client)
}
func TestRemoteClient(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
}).(*Backend)
state, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
createS3Bucket(t, b.client, bucketName)
defer deleteS3Bucket(t, b.client, bucketName)
remote.TestClient(t, state.(*remote.State).Client)
}
func TestRemoteClientLocks(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
s1, err := b1.State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
s2, err := b2.State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
createS3Bucket(t, b1.client, bucketName)
defer deleteS3Bucket(t, b1.client, bucketName)
createDynamoDBTable(t, b1.client, bucketName)
defer deleteDynamoDBTable(t, b1.client, bucketName)
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}

View File

@ -51,7 +51,6 @@ var BuiltinClients = map[string]Factory{
"gcs": gcsFactory,
"http": httpFactory,
"local": fileFactory,
"s3": s3Factory,
"swift": swiftFactory,
"manta": mantaFactory,
}

View File

@ -1,238 +0,0 @@
package remote
import (
"fmt"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
)
func TestS3Client_impl(t *testing.T) {
var _ Client = new(S3Client)
var _ ClientLocker = new(S3Client)
}
func TestS3Factory(t *testing.T) {
// This test just instantiates the client. Shouldn't make any actual
// requests nor incur any costs.
config := make(map[string]string)
// Empty config is an error
_, err := s3Factory(config)
if err == nil {
t.Fatalf("Empty config should be error")
}
config["region"] = "us-west-1"
config["bucket"] = "foo"
config["key"] = "bar"
config["encrypt"] = "1"
// For this test we'll provide the credentials as config. The
// acceptance tests implicitly test passing credentials as
// environment variables.
config["access_key"] = "bazkey"
config["secret_key"] = "bazsecret"
client, err := s3Factory(config)
if err != nil {
t.Fatalf("Error for valid config")
}
s3Client := client.(*S3Client)
if *s3Client.nativeClient.Config.Region != "us-west-1" {
t.Fatalf("Incorrect region was populated")
}
if s3Client.bucketName != "foo" {
t.Fatalf("Incorrect bucketName was populated")
}
if s3Client.keyName != "bar" {
t.Fatalf("Incorrect keyName was populated")
}
credentials, err := s3Client.nativeClient.Config.Credentials.Get()
if err != nil {
t.Fatalf("Error when requesting credentials")
}
if credentials.AccessKeyID != "bazkey" {
t.Fatalf("Incorrect Access Key Id was populated")
}
if credentials.SecretAccessKey != "bazsecret" {
t.Fatalf("Incorrect Secret Access Key was populated")
}
}
func TestS3Client(t *testing.T) {
// This test creates a bucket in S3 and populates it.
// It may incur costs, so it will only run if AWS credential environment
// variables are present.
accessKeyId := os.Getenv("AWS_ACCESS_KEY_ID")
if accessKeyId == "" {
t.Skipf("skipping; AWS_ACCESS_KEY_ID must be set")
}
regionName := os.Getenv("AWS_DEFAULT_REGION")
if regionName == "" {
regionName = "us-west-2"
}
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
testData := []byte(`testing data`)
config := make(map[string]string)
config["region"] = regionName
config["bucket"] = bucketName
config["key"] = keyName
config["encrypt"] = "1"
client, err := s3Factory(config)
if err != nil {
t.Fatalf("Error for valid config")
}
s3Client := client.(*S3Client)
nativeClient := s3Client.nativeClient
createBucketReq := &s3.CreateBucketInput{
Bucket: &bucketName,
}
// Be clear about what we're doing in case the user needs to clean
// this up later.
t.Logf("Creating S3 bucket %s in %s", bucketName, regionName)
_, err = nativeClient.CreateBucket(createBucketReq)
if err != nil {
t.Skipf("Failed to create test S3 bucket, so skipping")
}
// Ensure we can perform a PUT request with the encryption header
err = s3Client.Put(testData)
if err != nil {
t.Logf("WARNING: Failed to send test data to S3 bucket. (error was %s)", err)
}
defer func() {
deleteBucketReq := &s3.DeleteBucketInput{
Bucket: &bucketName,
}
_, err := nativeClient.DeleteBucket(deleteBucketReq)
if err != nil {
t.Logf("WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)", err)
}
}()
testClient(t, client)
}
func TestS3ClientLocks(t *testing.T) {
// This test creates a DynamoDB table.
// It may incur costs, so it will only run if AWS credential environment
// variables are present.
accessKeyId := os.Getenv("AWS_ACCESS_KEY_ID")
if accessKeyId == "" {
t.Skipf("skipping; AWS_ACCESS_KEY_ID must be set")
}
regionName := os.Getenv("AWS_DEFAULT_REGION")
if regionName == "" {
regionName = "us-west-2"
}
bucketName := fmt.Sprintf("terraform-remote-s3-lock-%x", time.Now().Unix())
keyName := "testState"
config := make(map[string]string)
config["region"] = regionName
config["bucket"] = bucketName
config["key"] = keyName
config["encrypt"] = "1"
config["lock_table"] = bucketName
client, err := s3Factory(config)
if err != nil {
t.Fatalf("Error for valid config")
}
s3Client := client.(*S3Client)
// set this up before we try to crate the table, in case we timeout creating it.
defer deleteDynaboDBTable(t, s3Client, bucketName)
createDynamoDBTable(t, s3Client, bucketName)
TestRemoteLocks(t, client, client)
}
// create the dynamoDB table, and wait until we can query it.
func createDynamoDBTable(t *testing.T, c *S3Client, tableName string) {
createInput := &dynamodb.CreateTableInput{
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String("LockID"),
AttributeType: aws.String("S"),
},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String("LockID"),
KeyType: aws.String("HASH"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(5),
WriteCapacityUnits: aws.Int64(5),
},
TableName: aws.String(tableName),
}
_, err := c.dynClient.CreateTable(createInput)
if err != nil {
t.Fatal(err)
}
// now wait until it's ACTIVE
start := time.Now()
time.Sleep(time.Second)
describeInput := &dynamodb.DescribeTableInput{
TableName: aws.String(tableName),
}
for {
resp, err := c.dynClient.DescribeTable(describeInput)
if err != nil {
t.Fatal(err)
}
if *resp.Table.TableStatus == "ACTIVE" {
return
}
if time.Since(start) > time.Minute {
t.Fatalf("timed out creating DynamoDB table %s", tableName)
}
time.Sleep(3 * time.Second)
}
}
func deleteDynaboDBTable(t *testing.T, c *S3Client, tableName string) {
params := &dynamodb.DeleteTableInput{
TableName: aws.String(tableName),
}
_, err := c.dynClient.DeleteTable(params)
if err != nil {
t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err)
}
}