remove unused helper packages

helper/schema has been moved into internal/legacy/helper.
helper/resource and helper/validation are no longer used.
This commit is contained in:
James Bardin 2020-11-18 10:08:50 -05:00
parent 75bbf0b62b
commit 29c342b020
63 changed files with 0 additions and 34440 deletions

View File

@ -1,79 +0,0 @@
package resource
import (
"fmt"
"strings"
"time"
)
type NotFoundError struct {
LastError error
LastRequest interface{}
LastResponse interface{}
Message string
Retries int
}
func (e *NotFoundError) Error() string {
if e.Message != "" {
return e.Message
}
if e.Retries > 0 {
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
}
return "couldn't find resource"
}
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
type UnexpectedStateError struct {
LastError error
State string
ExpectedState []string
}
func (e *UnexpectedStateError) Error() string {
return fmt.Sprintf(
"unexpected state '%s', wanted target '%s'. last error: %s",
e.State,
strings.Join(e.ExpectedState, ", "),
e.LastError,
)
}
// TimeoutError is returned when WaitForState times out
type TimeoutError struct {
LastError error
LastState string
Timeout time.Duration
ExpectedState []string
}
func (e *TimeoutError) Error() string {
expectedState := "resource to be gone"
if len(e.ExpectedState) > 0 {
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
}
extraInfo := make([]string, 0)
if e.LastState != "" {
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
}
if e.Timeout > 0 {
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
}
suffix := ""
if len(extraInfo) > 0 {
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
}
if e.LastError != nil {
return fmt.Sprintf("timeout while waiting for %s%s: %s",
expectedState, suffix, e.LastError)
}
return fmt.Sprintf("timeout while waiting for %s%s",
expectedState, suffix)
}

View File

@ -1,43 +0,0 @@
package resource
import (
"context"
"net"
"time"
"github.com/hashicorp/terraform/helper/plugin"
proto "github.com/hashicorp/terraform/internal/tfplugin5"
tfplugin "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/grpc"
"google.golang.org/grpc/test/bufconn"
)
// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC
// shim and starts it in a grpc server using an inmem connection. It returns a
// GRPCClient for this new server to test the shimmed resource provider.
func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface {
listener := bufconn.Listen(256 * 1024)
grpcServer := grpc.NewServer()
p := plugin.NewGRPCProviderServerShim(rp)
proto.RegisterProviderServer(grpcServer, p)
go grpcServer.Serve(listener)
conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
return listener.Dial()
}), grpc.WithInsecure())
if err != nil {
panic(err)
}
var pp tfplugin.GRPCProviderPlugin
client, _ := pp.GRPCClient(context.Background(), nil, conn)
grpcClient := client.(*tfplugin.GRPCProvider)
grpcClient.TestServer = grpcServer
return grpcClient
}

View File

@ -1,45 +0,0 @@
package resource
import (
"fmt"
"strings"
"sync"
"time"
)
const UniqueIdPrefix = `terraform-`
// idCounter is a monotonic counter for generating ordered unique ids.
var idMutex sync.Mutex
var idCounter uint32
// Helper for a resource to generate a unique identifier w/ default prefix
func UniqueId() string {
return PrefixedUniqueId(UniqueIdPrefix)
}
// UniqueIDSuffixLength is the string length of the suffix generated by
// PrefixedUniqueId. This can be used by length validation functions to
// ensure prefixes are the correct length for the target field.
const UniqueIDSuffixLength = 26
// Helper for a resource to generate a unique identifier w/ given prefix
//
// After the prefix, the ID consists of an incrementing 26 digit value (to match
// previous timestamp output). After the prefix, the ID consists of a timestamp
// and an incrementing 8 hex digit value The timestamp means that multiple IDs
// created with the same prefix will sort in the order of their creation, even
// across multiple terraform executions, as long as the clock is not turned back
// between calls, and as long as any given terraform execution generates fewer
// than 4 billion IDs.
func PrefixedUniqueId(prefix string) string {
// Be precise to 4 digits of fractional seconds, but remove the dot before the
// fractional seconds.
timestamp := strings.Replace(
time.Now().UTC().Format("20060102150405.0000"), ".", "", 1)
idMutex.Lock()
defer idMutex.Unlock()
idCounter++
return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter)
}

View File

@ -1,66 +0,0 @@
package resource
import (
"regexp"
"strings"
"testing"
"time"
)
var allDigits = regexp.MustCompile(`^\d+$`)
var allHex = regexp.MustCompile(`^[a-f0-9]+$`)
func TestUniqueId(t *testing.T) {
split := func(rest string) (timestamp, increment string) {
return rest[:18], rest[18:]
}
iterations := 10000
ids := make(map[string]struct{})
var id, lastId string
for i := 0; i < iterations; i++ {
id = UniqueId()
if _, ok := ids[id]; ok {
t.Fatalf("Got duplicated id! %s", id)
}
if !strings.HasPrefix(id, UniqueIdPrefix) {
t.Fatalf("Unique ID didn't have terraform- prefix! %s", id)
}
rest := strings.TrimPrefix(id, UniqueIdPrefix)
if len(rest) != UniqueIDSuffixLength {
t.Fatalf("PrefixedUniqueId is out of sync with UniqueIDSuffixLength, post-prefix part has wrong length! %s", rest)
}
timestamp, increment := split(rest)
if !allDigits.MatchString(timestamp) {
t.Fatalf("Timestamp not all digits! %s", timestamp)
}
if !allHex.MatchString(increment) {
t.Fatalf("Increment part not all hex! %s", increment)
}
if lastId != "" && lastId >= id {
t.Fatalf("IDs not ordered! %s vs %s", lastId, id)
}
ids[id] = struct{}{}
lastId = id
}
id1 := UniqueId()
time.Sleep(time.Millisecond)
id2 := UniqueId()
timestamp1, _ := split(strings.TrimPrefix(id1, UniqueIdPrefix))
timestamp2, _ := split(strings.TrimPrefix(id2, UniqueIdPrefix))
if timestamp1 == timestamp2 {
t.Fatalf("Timestamp part should update at least once a millisecond %s %s",
id1, id2)
}
}

View File

@ -1,259 +0,0 @@
package resource
import (
"log"
"time"
)
var refreshGracePeriod = 30 * time.Second
// StateRefreshFunc is a function type used for StateChangeConf that is
// responsible for refreshing the item being watched for a state change.
//
// It returns three results. `result` is any object that will be returned
// as the final object after waiting for state change. This allows you to
// return the final updated object, for example an EC2 instance after refreshing
// it.
//
// `state` is the latest state of that object. And `err` is any error that
// may have happened while refreshing the state.
type StateRefreshFunc func() (result interface{}, state string, err error)
// StateChangeConf is the configuration struct used for `WaitForState`.
type StateChangeConf struct {
Delay time.Duration // Wait this time before starting checks
Pending []string // States that are "allowed" and will continue trying
Refresh StateRefreshFunc // Refreshes the current state
Target []string // Target state
Timeout time.Duration // The amount of time to wait before timeout
MinTimeout time.Duration // Smallest time to wait before refreshes
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
NotFoundChecks int // Number of times to allow not found
// This is to work around inconsistent APIs
ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
}
// WaitForState watches an object and waits for it to achieve the state
// specified in the configuration using the specified Refresh() func,
// waiting the number of seconds specified in the timeout configuration.
//
// If the Refresh function returns an error, exit immediately with that error.
//
// If the Refresh function returns a state other than the Target state or one
// listed in Pending, return immediately with an error.
//
// If the Timeout is exceeded before reaching the Target state, return an
// error.
//
// Otherwise, the result is the result of the first call to the Refresh function to
// reach the target state.
func (conf *StateChangeConf) WaitForState() (interface{}, error) {
log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
notfoundTick := 0
targetOccurence := 0
// Set a default for times to check for not found
if conf.NotFoundChecks == 0 {
conf.NotFoundChecks = 20
}
if conf.ContinuousTargetOccurence == 0 {
conf.ContinuousTargetOccurence = 1
}
type Result struct {
Result interface{}
State string
Error error
Done bool
}
// Read every result from the refresh loop, waiting for a positive result.Done.
resCh := make(chan Result, 1)
// cancellation channel for the refresh loop
cancelCh := make(chan struct{})
result := Result{}
go func() {
defer close(resCh)
time.Sleep(conf.Delay)
// start with 0 delay for the first loop
var wait time.Duration
for {
// store the last result
resCh <- result
// wait and watch for cancellation
select {
case <-cancelCh:
return
case <-time.After(wait):
// first round had no wait
if wait == 0 {
wait = 100 * time.Millisecond
}
}
res, currentState, err := conf.Refresh()
result = Result{
Result: res,
State: currentState,
Error: err,
}
if err != nil {
resCh <- result
return
}
// If we're waiting for the absence of a thing, then return
if res == nil && len(conf.Target) == 0 {
targetOccurence++
if conf.ContinuousTargetOccurence == targetOccurence {
result.Done = true
resCh <- result
return
}
continue
}
if res == nil {
// If we didn't find the resource, check if we have been
// not finding it for awhile, and if so, report an error.
notfoundTick++
if notfoundTick > conf.NotFoundChecks {
result.Error = &NotFoundError{
LastError: err,
Retries: notfoundTick,
}
resCh <- result
return
}
} else {
// Reset the counter for when a resource isn't found
notfoundTick = 0
found := false
for _, allowed := range conf.Target {
if currentState == allowed {
found = true
targetOccurence++
if conf.ContinuousTargetOccurence == targetOccurence {
result.Done = true
resCh <- result
return
}
continue
}
}
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
targetOccurence = 0
break
}
}
if !found && len(conf.Pending) > 0 {
result.Error = &UnexpectedStateError{
LastError: err,
State: result.State,
ExpectedState: conf.Target,
}
resCh <- result
return
}
}
// Wait between refreshes using exponential backoff, except when
// waiting for the target state to reoccur.
if targetOccurence == 0 {
wait *= 2
}
// If a poll interval has been specified, choose that interval.
// Otherwise bound the default value.
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
wait = conf.PollInterval
} else {
if wait < conf.MinTimeout {
wait = conf.MinTimeout
} else if wait > 10*time.Second {
wait = 10 * time.Second
}
}
log.Printf("[TRACE] Waiting %s before next try", wait)
}
}()
// store the last value result from the refresh loop
lastResult := Result{}
timeout := time.After(conf.Timeout)
for {
select {
case r, ok := <-resCh:
// channel closed, so return the last result
if !ok {
return lastResult.Result, lastResult.Error
}
// we reached the intended state
if r.Done {
return r.Result, r.Error
}
// still waiting, store the last result
lastResult = r
case <-timeout:
log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
// cancel the goroutine and start our grace period timer
close(cancelCh)
timeout := time.After(refreshGracePeriod)
// we need a for loop and a label to break on, because we may have
// an extra response value to read, but still want to wait for the
// channel to close.
forSelect:
for {
select {
case r, ok := <-resCh:
if r.Done {
// the last refresh loop reached the desired state
return r.Result, r.Error
}
if !ok {
// the goroutine returned
break forSelect
}
// target state not reached, save the result for the
// TimeoutError and wait for the channel to close
lastResult = r
case <-timeout:
log.Println("[ERROR] WaitForState exceeded refresh grace period")
break forSelect
}
}
return nil, &TimeoutError{
LastError: lastResult.Error,
LastState: lastResult.State,
Timeout: conf.Timeout,
ExpectedState: conf.Target,
}
}
}
}

View File

@ -1,218 +0,0 @@
package resource
import (
"encoding/json"
"fmt"
"github.com/hashicorp/terraform/addrs"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/terraform"
)
// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests
func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) {
state := terraform.NewState()
// in the odd case of a nil state, let the helper packages handle it
if newState == nil {
return nil, nil
}
for _, newMod := range newState.Modules {
mod := state.AddModule(newMod.Addr)
for name, out := range newMod.OutputValues {
outputType := ""
val := hcl2shim.ConfigValueFromHCL2(out.Value)
ty := out.Value.Type()
switch {
case ty == cty.String:
outputType = "string"
case ty.IsTupleType() || ty.IsListType():
outputType = "list"
case ty.IsMapType():
outputType = "map"
}
mod.Outputs[name] = &terraform.OutputState{
Type: outputType,
Value: val,
Sensitive: out.Sensitive,
}
}
for _, res := range newMod.Resources {
resType := res.Addr.Resource.Type
providerType := res.ProviderConfig.Provider.Type
resource := getResource(providers, providerType, res.Addr.Resource)
for key, i := range res.Instances {
resState := &terraform.ResourceState{
Type: resType,
Provider: legacyProviderConfigString(res.ProviderConfig),
}
// We should always have a Current instance here, but be safe about checking.
if i.Current != nil {
flatmap, err := shimmedAttributes(i.Current, resource)
if err != nil {
return nil, fmt.Errorf("error decoding state for %q: %s", resType, err)
}
var meta map[string]interface{}
if i.Current.Private != nil {
err := json.Unmarshal(i.Current.Private, &meta)
if err != nil {
return nil, err
}
}
resState.Primary = &terraform.InstanceState{
ID: flatmap["id"],
Attributes: flatmap,
Tainted: i.Current.Status == states.ObjectTainted,
Meta: meta,
}
if i.Current.SchemaVersion != 0 {
if resState.Primary.Meta == nil {
resState.Primary.Meta = map[string]interface{}{}
}
resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion
}
// convert the indexes to the old style flapmap indexes
idx := ""
switch key.(type) {
case addrs.IntKey:
// don't add numeric index values to resources with a count of 0
if len(res.Instances) > 1 {
idx = fmt.Sprintf(".%d", key)
}
case addrs.StringKey:
idx = "." + key.String()
}
mod.Resources[res.Addr.Resource.String()+idx] = resState
}
// add any deposed instances
for _, dep := range i.Deposed {
flatmap, err := shimmedAttributes(dep, resource)
if err != nil {
return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err)
}
var meta map[string]interface{}
if dep.Private != nil {
err := json.Unmarshal(dep.Private, &meta)
if err != nil {
return nil, err
}
}
deposed := &terraform.InstanceState{
ID: flatmap["id"],
Attributes: flatmap,
Tainted: dep.Status == states.ObjectTainted,
Meta: meta,
}
if dep.SchemaVersion != 0 {
deposed.Meta = map[string]interface{}{
"schema_version": dep.SchemaVersion,
}
}
resState.Deposed = append(resState.Deposed, deposed)
}
}
}
}
return state, nil
}
func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource {
p := providers[providerName]
if p == nil {
panic(fmt.Sprintf("provider %q not found in test step", providerName))
}
// this is only for tests, so should only see schema.Providers
provider := p.(*schema.Provider)
switch addr.Mode {
case addrs.ManagedResourceMode:
resource := provider.ResourcesMap[addr.Type]
if resource != nil {
return resource
}
case addrs.DataResourceMode:
resource := provider.DataSourcesMap[addr.Type]
if resource != nil {
return resource
}
}
panic(fmt.Sprintf("resource %s not found in test step", addr.Type))
}
func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) {
flatmap := instance.AttrsFlat
if flatmap != nil {
return flatmap, nil
}
// if we have json attrs, they need to be decoded
rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType())
if err != nil {
return nil, err
}
instanceState, err := res.ShimInstanceStateFromValue(rio.Value)
if err != nil {
return nil, err
}
return instanceState.Attributes, nil
}
func shimLegacyState(legacy *terraform.State) (*states.State, error) {
state, err := terraform.ShimLegacyState(legacy)
if err != nil {
return nil, err
}
if state.HasResources() {
for _, module := range state.Modules {
for name, resource := range module.Resources {
module.Resources[name].ProviderConfig.Provider = addrs.ImpliedProviderForUnqualifiedType(resource.Addr.Resource.ImpliedProvider())
}
}
}
return state, err
}
// legacyProviderConfigString was copied from addrs.Provider.LegacyString() to
// create a legacy-style string from a non-legacy provider. This is only
// necessary as this package shims back and forth between legacy and modern
// state, neither of which encode the addrs.Provider for a resource.
func legacyProviderConfigString(pc addrs.AbsProviderConfig) string {
if pc.Alias != "" {
if len(pc.Module) == 0 {
return fmt.Sprintf("%s.%s.%s", "provider", pc.Provider.Type, pc.Alias)
} else {
return fmt.Sprintf("%s.%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString(), pc.Alias)
}
}
if len(pc.Module) == 0 {
return fmt.Sprintf("%s.%s", "provider", pc.Provider.Type)
}
return fmt.Sprintf("%s.%s.%s", pc.Module.String(), "provider", pc.Provider.Type)
}

View File

@ -1,387 +0,0 @@
package resource
import (
"testing"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/terraform"
"github.com/zclconf/go-cty/cty"
)
// TestStateShim is meant to be a fairly comprehensive test, checking for dependencies, root outputs,
func TestStateShim(t *testing.T) {
state := states.NewState()
rootModule := state.RootModule()
if rootModule == nil {
t.Errorf("root module is nil; want valid object")
}
rootModule.SetOutputValue("bar", cty.ListVal([]cty.Value{cty.StringVal("bar"), cty.StringVal("value")}), false)
rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true)
rootModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "foo",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsFlat: map[string]string{"id": "foo", "bazzle": "dazzle"},
SchemaVersion: 7,
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
rootModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "baz",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsFlat: map[string]string{"id": "baz", "bazzle": "dazzle"},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
childInstance := addrs.RootModuleInstance.Child("child", addrs.NoKey)
childModule := state.EnsureModule(childInstance)
childModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "test_data_thing",
Name: "foo",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id": "bar", "fuzzle":"wuzzle"}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: childInstance.Module(),
},
)
childModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "baz",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id": "bar", "fizzle":"wizzle"}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: childInstance.Module(),
},
)
childModule.SetResourceInstanceDeposed(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "baz",
}.Instance(addrs.NoKey),
"00000001",
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsFlat: map[string]string{"id": "old", "fizzle": "wizzle"},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: childInstance.Module(),
},
)
childModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "lots",
}.Instance(addrs.IntKey(0)),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsFlat: map[string]string{"id": "0", "bazzle": "dazzle"},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: childInstance.Module(),
},
)
childModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "lots",
}.Instance(addrs.IntKey(1)),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsFlat: map[string]string{"id": "1", "bazzle": "dazzle"},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: childInstance.Module(),
},
)
childModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "single_count",
}.Instance(addrs.IntKey(0)),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id": "single", "bazzle":"dazzle"}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: childInstance.Module(),
},
)
expected := &terraform.State{
Version: 3,
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Outputs: map[string]*terraform.OutputState{
"bar": {
Type: "list",
Value: []interface{}{"bar", "value"},
},
"secret": {
Sensitive: true,
Type: "string",
Value: "secret value",
},
},
Resources: map[string]*terraform.ResourceState{
"test_thing.baz": &terraform.ResourceState{
Type: "test_thing",
Provider: "provider.test",
Primary: &terraform.InstanceState{
ID: "baz",
Attributes: map[string]string{
"id": "baz",
"bazzle": "dazzle",
},
},
},
"test_thing.foo": &terraform.ResourceState{
Type: "test_thing",
Provider: "provider.test",
Primary: &terraform.InstanceState{
ID: "foo",
Attributes: map[string]string{
"id": "foo",
"bazzle": "dazzle",
},
Meta: map[string]interface{}{
"schema_version": 7,
},
},
},
},
},
&terraform.ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*terraform.ResourceState{
"test_thing.baz": &terraform.ResourceState{
Type: "test_thing",
Provider: "module.child.provider.test",
Primary: &terraform.InstanceState{
ID: "bar",
Attributes: map[string]string{
"id": "bar",
"fizzle": "wizzle",
},
},
Deposed: []*terraform.InstanceState{
{
ID: "old",
Attributes: map[string]string{
"id": "old",
"fizzle": "wizzle",
},
},
},
},
"data.test_data_thing.foo": &terraform.ResourceState{
Type: "test_data_thing",
Provider: "module.child.provider.test",
Primary: &terraform.InstanceState{
ID: "bar",
Attributes: map[string]string{
"id": "bar",
"fuzzle": "wuzzle",
},
},
},
"test_thing.lots.0": &terraform.ResourceState{
Type: "test_thing",
Provider: "module.child.provider.test",
Primary: &terraform.InstanceState{
ID: "0",
Attributes: map[string]string{
"id": "0",
"bazzle": "dazzle",
},
},
},
"test_thing.lots.1": &terraform.ResourceState{
Type: "test_thing",
Provider: "module.child.provider.test",
Primary: &terraform.InstanceState{
ID: "1",
Attributes: map[string]string{
"id": "1",
"bazzle": "dazzle",
},
Tainted: true,
},
},
"test_thing.single_count": &terraform.ResourceState{
Type: "test_thing",
Provider: "module.child.provider.test",
Primary: &terraform.InstanceState{
ID: "single",
Attributes: map[string]string{
"id": "single",
"bazzle": "dazzle",
},
},
},
},
},
},
}
providers := map[string]terraform.ResourceProvider{
"test": &schema.Provider{
ResourcesMap: map[string]*schema.Resource{
"test_thing": &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {Type: schema.TypeString, Computed: true},
"fizzle": {Type: schema.TypeString, Optional: true},
"bazzle": {Type: schema.TypeString, Optional: true},
},
},
},
DataSourcesMap: map[string]*schema.Resource{
"test_data_thing": &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {Type: schema.TypeString, Computed: true},
"fuzzle": {Type: schema.TypeString, Optional: true},
},
},
},
},
}
shimmed, err := shimNewState(state, providers)
if err != nil {
t.Fatal(err)
}
if !expected.Equal(shimmed) {
t.Fatalf("wrong result state\ngot:\n%s\n\nwant:\n%s", shimmed, expected)
}
}
// TestShimLegacyState only checks the functionality unique to this func: adding
// the implied provider FQN
func TestShimLegacyState(t *testing.T) {
input := &terraform.State{
Version: 3,
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Resources: map[string]*terraform.ResourceState{
"test_thing.baz": &terraform.ResourceState{
Type: "test_thing",
Provider: "provider.test",
Primary: &terraform.InstanceState{
ID: "baz",
Attributes: map[string]string{
"id": "baz",
"bazzle": "dazzle",
},
},
},
},
},
&terraform.ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*terraform.ResourceState{
"test_thing.bar": &terraform.ResourceState{
Type: "test_thing",
Provider: "module.child.provider.test",
Primary: &terraform.InstanceState{
ID: "bar",
Attributes: map[string]string{
"id": "bar",
"fizzle": "wizzle",
},
},
},
},
},
},
}
expected := states.NewState()
root := expected.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "baz",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsFlat: map[string]string{"id": "baz", "bazzle": "dazzle"},
Dependencies: []addrs.ConfigResource{},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
child := expected.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey))
child.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "bar",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsFlat: map[string]string{"id": "bar", "fizzle": "wizzle"},
Dependencies: []addrs.ConfigResource{},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: child.Addr.Module(),
},
)
got, err := shimLegacyState(input)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if !got.Equal(expected) {
t.Fatal("wrong result")
}
}

View File

@ -1,329 +0,0 @@
package resource
import (
"errors"
"strings"
"sync/atomic"
"testing"
"time"
)
func FailedStateRefreshFunc() StateRefreshFunc {
return func() (interface{}, string, error) {
return nil, "", errors.New("failed")
}
}
func TimeoutStateRefreshFunc() StateRefreshFunc {
return func() (interface{}, string, error) {
time.Sleep(100 * time.Second)
return nil, "", errors.New("failed")
}
}
func SuccessfulStateRefreshFunc() StateRefreshFunc {
return func() (interface{}, string, error) {
return struct{}{}, "running", nil
}
}
type StateGenerator struct {
position int
stateSequence []string
}
func (r *StateGenerator) NextState() (int, string, error) {
p, v := r.position, ""
if len(r.stateSequence)-1 >= p {
v = r.stateSequence[p]
} else {
return -1, "", errors.New("No more states available")
}
r.position += 1
return p, v, nil
}
func NewStateGenerator(sequence []string) *StateGenerator {
r := &StateGenerator{}
r.stateSequence = sequence
return r
}
func InconsistentStateRefreshFunc() StateRefreshFunc {
sequence := []string{
"done", "replicating",
"done", "done", "done",
"replicating",
"done", "done", "done",
}
r := NewStateGenerator(sequence)
return func() (interface{}, string, error) {
idx, s, err := r.NextState()
if err != nil {
return nil, "", err
}
return idx, s, nil
}
}
func UnknownPendingStateRefreshFunc() StateRefreshFunc {
sequence := []string{
"unknown1", "unknown2", "done",
}
r := NewStateGenerator(sequence)
return func() (interface{}, string, error) {
idx, s, err := r.NextState()
if err != nil {
return nil, "", err
}
return idx, s, nil
}
}
func TestWaitForState_inconsistent_positive(t *testing.T) {
conf := &StateChangeConf{
Pending: []string{"replicating"},
Target: []string{"done"},
Refresh: InconsistentStateRefreshFunc(),
Timeout: 90 * time.Millisecond,
PollInterval: 10 * time.Millisecond,
ContinuousTargetOccurence: 3,
}
idx, err := conf.WaitForState()
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 4 {
t.Fatalf("Expected index 4, given %d", idx.(int))
}
}
func TestWaitForState_inconsistent_negative(t *testing.T) {
refreshCount := int64(0)
f := InconsistentStateRefreshFunc()
refresh := func() (interface{}, string, error) {
atomic.AddInt64(&refreshCount, 1)
return f()
}
conf := &StateChangeConf{
Pending: []string{"replicating"},
Target: []string{"done"},
Refresh: refresh,
Timeout: 85 * time.Millisecond,
PollInterval: 10 * time.Millisecond,
ContinuousTargetOccurence: 4,
}
_, err := conf.WaitForState()
if err == nil {
t.Fatal("Expected timeout error. No error returned.")
}
// we can't guarantee the exact number of refresh calls in the tests by
// timing them, but we want to make sure the test at least went through th
// required states.
if atomic.LoadInt64(&refreshCount) < 6 {
t.Fatal("refreshed called too few times")
}
expectedErr := "timeout while waiting for state to become 'done'"
if !strings.HasPrefix(err.Error(), expectedErr) {
t.Fatalf("error prefix doesn't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error())
}
}
func TestWaitForState_timeout(t *testing.T) {
old := refreshGracePeriod
refreshGracePeriod = 5 * time.Millisecond
defer func() {
refreshGracePeriod = old
}()
conf := &StateChangeConf{
Pending: []string{"pending", "incomplete"},
Target: []string{"running"},
Refresh: TimeoutStateRefreshFunc(),
Timeout: 1 * time.Millisecond,
}
obj, err := conf.WaitForState()
if err == nil {
t.Fatal("Expected timeout error. No error returned.")
}
expectedErr := "timeout while waiting for state to become 'running' (timeout: 1ms)"
if err.Error() != expectedErr {
t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error())
}
if obj != nil {
t.Fatalf("should not return obj")
}
}
// Make sure a timeout actually cancels the refresh goroutine and waits for its
// return.
func TestWaitForState_cancel(t *testing.T) {
// make this refresh func block until we cancel it
cancel := make(chan struct{})
refresh := func() (interface{}, string, error) {
<-cancel
return nil, "pending", nil
}
conf := &StateChangeConf{
Pending: []string{"pending", "incomplete"},
Target: []string{"running"},
Refresh: refresh,
Timeout: 10 * time.Millisecond,
PollInterval: 10 * time.Second,
}
var obj interface{}
var err error
waitDone := make(chan struct{})
go func() {
defer close(waitDone)
obj, err = conf.WaitForState()
}()
// make sure WaitForState is blocked
select {
case <-waitDone:
t.Fatal("WaitForState returned too early")
case <-time.After(10 * time.Millisecond):
}
// unlock the refresh function
close(cancel)
// make sure WaitForState returns
select {
case <-waitDone:
case <-time.After(time.Second):
t.Fatal("WaitForState didn't return after refresh finished")
}
if err == nil {
t.Fatal("Expected timeout error. No error returned.")
}
expectedErr := "timeout while waiting for state to become 'running'"
if !strings.HasPrefix(err.Error(), expectedErr) {
t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error())
}
if obj != nil {
t.Fatalf("should not return obj")
}
}
func TestWaitForState_success(t *testing.T) {
conf := &StateChangeConf{
Pending: []string{"pending", "incomplete"},
Target: []string{"running"},
Refresh: SuccessfulStateRefreshFunc(),
Timeout: 200 * time.Second,
}
obj, err := conf.WaitForState()
if err != nil {
t.Fatalf("err: %s", err)
}
if obj == nil {
t.Fatalf("should return obj")
}
}
func TestWaitForState_successUnknownPending(t *testing.T) {
conf := &StateChangeConf{
Target: []string{"done"},
Refresh: UnknownPendingStateRefreshFunc(),
Timeout: 200 * time.Second,
}
obj, err := conf.WaitForState()
if err != nil {
t.Fatalf("err: %s", err)
}
if obj == nil {
t.Fatalf("should return obj")
}
}
func TestWaitForState_successEmpty(t *testing.T) {
conf := &StateChangeConf{
Pending: []string{"pending", "incomplete"},
Target: []string{},
Refresh: func() (interface{}, string, error) {
return nil, "", nil
},
Timeout: 200 * time.Second,
}
obj, err := conf.WaitForState()
if err != nil {
t.Fatalf("err: %s", err)
}
if obj != nil {
t.Fatalf("obj should be nil")
}
}
func TestWaitForState_failureEmpty(t *testing.T) {
conf := &StateChangeConf{
Pending: []string{"pending", "incomplete"},
Target: []string{},
NotFoundChecks: 1,
Refresh: func() (interface{}, string, error) {
return 42, "pending", nil
},
PollInterval: 10 * time.Millisecond,
Timeout: 100 * time.Millisecond,
}
_, err := conf.WaitForState()
if err == nil {
t.Fatal("Expected timeout error. Got none.")
}
expectedErr := "timeout while waiting for resource to be gone (last state: 'pending', timeout: 100ms)"
if err.Error() != expectedErr {
t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error())
}
}
func TestWaitForState_failure(t *testing.T) {
conf := &StateChangeConf{
Pending: []string{"pending", "incomplete"},
Target: []string{"running"},
Refresh: FailedStateRefreshFunc(),
Timeout: 200 * time.Second,
}
obj, err := conf.WaitForState()
if err == nil {
t.Fatal("Expected error. No error returned.")
}
expectedErr := "failed"
if err.Error() != expectedErr {
t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error())
}
if obj != nil {
t.Fatalf("should not return obj")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,378 +0,0 @@
package resource
import (
"bufio"
"bytes"
"errors"
"fmt"
"log"
"sort"
"strings"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
)
// testStepConfig runs a config-mode test step
func testStepConfig(
opts terraform.ContextOpts,
state *terraform.State,
step TestStep) (*terraform.State, error) {
return testStep(opts, state, step)
}
func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) {
if !step.Destroy {
if err := testStepTaint(state, step); err != nil {
return state, err
}
}
cfg, err := testConfig(opts, step)
if err != nil {
return state, err
}
var stepDiags tfdiags.Diagnostics
// Build the context
opts.Config = cfg
opts.State, err = shimLegacyState(state)
if err != nil {
return nil, err
}
opts.Destroy = step.Destroy
ctx, stepDiags := terraform.NewContext(&opts)
if stepDiags.HasErrors() {
return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err())
}
if stepDiags := ctx.Validate(); len(stepDiags) > 0 {
if stepDiags.HasErrors() {
return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err())
}
log.Printf("[WARN] Config warnings:\n%s", stepDiags)
}
// If this step is a PlanOnly step, skip over this first Plan and subsequent
// Apply, and use the follow up Plan that checks for perpetual diffs
if !step.PlanOnly {
// Plan!
p, stepDiags := ctx.Plan()
if stepDiags.HasErrors() {
return state, newOperationError("plan", stepDiags)
}
newState := p.State
log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes))
// We need to keep a copy of the state prior to destroying
// such that destroy steps can verify their behavior in the check
// function
stateBeforeApplication := state.DeepCopy()
// Apply the diff, creating real resources.
newState, stepDiags = ctx.Apply()
// shim the state first so the test can check the state on errors
state, err = shimNewState(newState, step.providers)
if err != nil {
return nil, err
}
if stepDiags.HasErrors() {
return state, newOperationError("apply", stepDiags)
}
// Run any configured checks
if step.Check != nil {
if step.Destroy {
if err := step.Check(stateBeforeApplication); err != nil {
return state, fmt.Errorf("Check failed: %s", err)
}
} else {
if err := step.Check(state); err != nil {
return state, fmt.Errorf("Check failed: %s", err)
}
}
}
}
// Now, verify that Plan is now empty and we don't have a perpetual diff issue
// We do this with TWO plans. One without a refresh.
p, stepDiags := ctx.Plan()
if stepDiags.HasErrors() {
return state, newOperationError("follow-up plan", stepDiags)
}
// we don't technically need this any longer with plan handling refreshing,
// but run it anyway to ensure the context is working as expected.
p, stepDiags = ctx.Plan()
if stepDiags.HasErrors() {
return state, newOperationError("second follow-up plan", stepDiags)
}
empty := true
newState := p.State
// the legacy tests never took outputs into account
for _, c := range p.Changes.Resources {
if c.Action != plans.NoOp {
empty = false
break
}
}
if !empty {
if step.ExpectNonEmptyPlan {
log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
} else {
return state, fmt.Errorf(
"After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
}
}
if !empty {
if step.ExpectNonEmptyPlan {
log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
} else {
return state, fmt.Errorf(
"After applying this step and refreshing, "+
"the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
}
}
// Made it here, but expected a non-empty plan, fail!
if step.ExpectNonEmptyPlan && empty {
return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
}
// Made it here? Good job test step!
return state, nil
}
// legacyPlanComparisonString produces a string representation of the changes
// from a plan and a given state togther, as was formerly produced by the
// String method of terraform.Plan.
//
// This is here only for compatibility with existing tests that predate our
// new plan and state types, and should not be used in new tests. Instead, use
// a library like "cmp" to do a deep equality and diff on the two
// data structures.
func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string {
return fmt.Sprintf(
"DIFF:\n\n%s\n\nSTATE:\n\n%s",
legacyDiffComparisonString(changes),
state.String(),
)
}
// legacyDiffComparisonString produces a string representation of the changes
// from a planned changes object, as was formerly produced by the String method
// of terraform.Diff.
//
// This is here only for compatibility with existing tests that predate our
// new plan types, and should not be used in new tests. Instead, use a library
// like "cmp" to do a deep equality check and diff on the two data structures.
func legacyDiffComparisonString(changes *plans.Changes) string {
// The old string representation of a plan was grouped by module, but
// our new plan structure is not grouped in that way and so we'll need
// to preprocess it in order to produce that grouping.
type ResourceChanges struct {
Current *plans.ResourceInstanceChangeSrc
Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc
}
byModule := map[string]map[string]*ResourceChanges{}
resourceKeys := map[string][]string{}
requiresReplace := map[string][]string{}
var moduleKeys []string
for _, rc := range changes.Resources {
if rc.Action == plans.NoOp {
// We won't mention no-op changes here at all, since the old plan
// model we are emulating here didn't have such a concept.
continue
}
moduleKey := rc.Addr.Module.String()
if _, exists := byModule[moduleKey]; !exists {
moduleKeys = append(moduleKeys, moduleKey)
byModule[moduleKey] = make(map[string]*ResourceChanges)
}
resourceKey := rc.Addr.Resource.String()
if _, exists := byModule[moduleKey][resourceKey]; !exists {
resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey)
byModule[moduleKey][resourceKey] = &ResourceChanges{
Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc),
}
}
if rc.DeposedKey == states.NotDeposed {
byModule[moduleKey][resourceKey].Current = rc
} else {
byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc
}
rr := []string{}
for _, p := range rc.RequiredReplace.List() {
rr = append(rr, hcl2shim.FlatmapKeyFromPath(p))
}
requiresReplace[resourceKey] = rr
}
sort.Strings(moduleKeys)
for _, ks := range resourceKeys {
sort.Strings(ks)
}
var buf bytes.Buffer
for _, moduleKey := range moduleKeys {
rcs := byModule[moduleKey]
var mBuf bytes.Buffer
for _, resourceKey := range resourceKeys[moduleKey] {
rc := rcs[resourceKey]
forceNewAttrs := requiresReplace[resourceKey]
crud := "UPDATE"
if rc.Current != nil {
switch rc.Current.Action {
case plans.DeleteThenCreate:
crud = "DESTROY/CREATE"
case plans.CreateThenDelete:
crud = "CREATE/DESTROY"
case plans.Delete:
crud = "DESTROY"
case plans.Create:
crud = "CREATE"
}
} else {
// We must be working on a deposed object then, in which
// case destroying is the only possible action.
crud = "DESTROY"
}
extra := ""
if rc.Current == nil && len(rc.Deposed) > 0 {
extra = " (deposed only)"
}
fmt.Fprintf(
&mBuf, "%s: %s%s\n",
crud, resourceKey, extra,
)
attrNames := map[string]bool{}
var oldAttrs map[string]string
var newAttrs map[string]string
if rc.Current != nil {
if before := rc.Current.Before; before != nil {
ty, err := before.ImpliedType()
if err == nil {
val, err := before.Decode(ty)
if err == nil {
oldAttrs = hcl2shim.FlatmapValueFromHCL2(val)
for k := range oldAttrs {
attrNames[k] = true
}
}
}
}
if after := rc.Current.After; after != nil {
ty, err := after.ImpliedType()
if err == nil {
val, err := after.Decode(ty)
if err == nil {
newAttrs = hcl2shim.FlatmapValueFromHCL2(val)
for k := range newAttrs {
attrNames[k] = true
}
}
}
}
}
if oldAttrs == nil {
oldAttrs = make(map[string]string)
}
if newAttrs == nil {
newAttrs = make(map[string]string)
}
attrNamesOrder := make([]string, 0, len(attrNames))
keyLen := 0
for n := range attrNames {
attrNamesOrder = append(attrNamesOrder, n)
if len(n) > keyLen {
keyLen = len(n)
}
}
sort.Strings(attrNamesOrder)
for _, attrK := range attrNamesOrder {
v := newAttrs[attrK]
u := oldAttrs[attrK]
if v == hcl2shim.UnknownVariableValue {
v = "<computed>"
}
// NOTE: we don't support <sensitive> here because we would
// need schema to do that. Excluding sensitive values
// is now done at the UI layer, and so should not be tested
// at the core layer.
updateMsg := ""
// This may not be as precise as in the old diff, as it matches
// everything under the attribute that was originally marked as
// ForceNew, but should help make it easier to determine what
// caused replacement here.
for _, k := range forceNewAttrs {
if strings.HasPrefix(attrK, k) {
updateMsg = " (forces new resource)"
break
}
}
fmt.Fprintf(
&mBuf, " %s:%s %#v => %#v%s\n",
attrK,
strings.Repeat(" ", keyLen-len(attrK)),
u, v,
updateMsg,
)
}
}
if moduleKey == "" { // root module
buf.Write(mBuf.Bytes())
buf.WriteByte('\n')
continue
}
fmt.Fprintf(&buf, "%s:\n", moduleKey)
s := bufio.NewScanner(&mBuf)
for s.Scan() {
buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
}
}
return buf.String()
}
func testStepTaint(state *terraform.State, step TestStep) error {
for _, p := range step.Taint {
m := state.RootModule()
if m == nil {
return errors.New("no state")
}
rs, ok := m.Resources[p]
if !ok {
return fmt.Errorf("resource %q not found in state", p)
}
log.Printf("[WARN] Test: Explicitly tainting resource %q", p)
rs.Taint()
}
return nil
}

View File

@ -1,230 +0,0 @@
package resource
import (
"fmt"
"log"
"reflect"
"strings"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/terraform"
)
// testStepImportState runs an import state test step
func testStepImportState(
opts terraform.ContextOpts,
state *terraform.State,
step TestStep) (*terraform.State, error) {
// Determine the ID to import
var importId string
switch {
case step.ImportStateIdFunc != nil:
var err error
importId, err = step.ImportStateIdFunc(state)
if err != nil {
return state, err
}
case step.ImportStateId != "":
importId = step.ImportStateId
default:
resource, err := testResource(step, state)
if err != nil {
return state, err
}
importId = resource.Primary.ID
}
importPrefix := step.ImportStateIdPrefix
if importPrefix != "" {
importId = fmt.Sprintf("%s%s", importPrefix, importId)
}
// Setup the context. We initialize with an empty state. We use the
// full config for provider configurations.
cfg, err := testConfig(opts, step)
if err != nil {
return state, err
}
opts.Config = cfg
// import tests start with empty state
opts.State = states.NewState()
ctx, stepDiags := terraform.NewContext(&opts)
if stepDiags.HasErrors() {
return state, stepDiags.Err()
}
// The test step provides the resource address as a string, so we need
// to parse it to get an addrs.AbsResourceAddress to pass in to the
// import method.
traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{})
if hclDiags.HasErrors() {
return nil, hclDiags
}
importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal)
if stepDiags.HasErrors() {
return nil, stepDiags.Err()
}
// Do the import
importedState, stepDiags := ctx.Import(&terraform.ImportOpts{
Targets: []*terraform.ImportTarget{
&terraform.ImportTarget{
Addr: importAddr,
ID: importId,
},
},
})
if stepDiags.HasErrors() {
log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err())
return state, stepDiags.Err()
}
newState, err := shimNewState(importedState, step.providers)
if err != nil {
return nil, err
}
// Go through the new state and verify
if step.ImportStateCheck != nil {
var states []*terraform.InstanceState
for _, r := range newState.RootModule().Resources {
if r.Primary != nil {
is := r.Primary.DeepCopy()
is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type
states = append(states, is)
}
}
if err := step.ImportStateCheck(states); err != nil {
return state, err
}
}
// Verify that all the states match
if step.ImportStateVerify {
new := newState.RootModule().Resources
old := state.RootModule().Resources
for _, r := range new {
// Find the existing resource
var oldR *terraform.ResourceState
for _, r2 := range old {
if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
oldR = r2
break
}
}
if oldR == nil {
return state, fmt.Errorf(
"Failed state verification, resource with ID %s not found",
r.Primary.ID)
}
// We'll try our best to find the schema for this resource type
// so we can ignore Removed fields during validation. If we fail
// to find the schema then we won't ignore them and so the test
// will need to rely on explicit ImportStateVerifyIgnore, though
// this shouldn't happen in any reasonable case.
var rsrcSchema *schema.Resource
if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() {
// FIXME
providerType := providerAddr.Provider.Type
if provider, ok := step.providers[providerType]; ok {
if provider, ok := provider.(*schema.Provider); ok {
rsrcSchema = provider.ResourcesMap[r.Type]
}
}
}
// don't add empty flatmapped containers, so we can more easily
// compare the attributes
skipEmpty := func(k, v string) bool {
if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") {
if v == "0" {
return true
}
}
return false
}
// Compare their attributes
actual := make(map[string]string)
for k, v := range r.Primary.Attributes {
if skipEmpty(k, v) {
continue
}
actual[k] = v
}
expected := make(map[string]string)
for k, v := range oldR.Primary.Attributes {
if skipEmpty(k, v) {
continue
}
expected[k] = v
}
// Remove fields we're ignoring
for _, v := range step.ImportStateVerifyIgnore {
for k := range actual {
if strings.HasPrefix(k, v) {
delete(actual, k)
}
}
for k := range expected {
if strings.HasPrefix(k, v) {
delete(expected, k)
}
}
}
// Also remove any attributes that are marked as "Removed" in the
// schema, if we have a schema to check that against.
if rsrcSchema != nil {
for k := range actual {
for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) {
if schema.Removed != "" {
delete(actual, k)
break
}
}
}
for k := range expected {
for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) {
if schema.Removed != "" {
delete(expected, k)
break
}
}
}
}
if !reflect.DeepEqual(actual, expected) {
// Determine only the different attributes
for k, v := range expected {
if av, ok := actual[k]; ok && v == av {
delete(expected, k)
delete(actual, k)
}
}
spewConf := spew.NewDefaultConfig()
spewConf.SortKeys = true
return state, fmt.Errorf(
"ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
"\n\n%s\n\n%s",
spewConf.Sdump(actual), spewConf.Sdump(expected))
}
}
}
// Return the old state (non-imported) so we don't change anything.
return state, nil
}

View File

@ -1,517 +0,0 @@
package resource
import (
"errors"
"fmt"
"testing"
"github.com/hashicorp/terraform/terraform"
)
func TestTest_importState(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.ImportStateReturn = []*terraform.InstanceState{
&terraform.InstanceState{
ID: "foo",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
checked := false
checkFn := func(s []*terraform.InstanceState) error {
checked = true
if s[0].ID != "foo" {
return fmt.Errorf("bad: %#v", s)
}
return nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStrProvider,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateId: "foo",
ImportStateCheck: checkFn,
},
},
})
if mt.failed() {
t.Fatalf("test failed: %s", mt.failMessage())
}
if !checked {
t.Fatal("didn't call check")
}
}
func TestTest_importStateFail(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.ImportStateReturn = []*terraform.InstanceState{
&terraform.InstanceState{
ID: "bar",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
checked := false
checkFn := func(s []*terraform.InstanceState) error {
checked = true
if s[0].ID != "foo" {
return fmt.Errorf("bad: %#v", s)
}
return nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStrProvider,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateId: "foo",
ImportStateCheck: checkFn,
},
},
})
if !mt.failed() {
t.Fatal("should fail")
}
if !checked {
t.Fatal("didn't call check")
}
}
func TestTest_importStateDetectId(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.DiffReturn = nil
mp.ApplyFn = func(
info *terraform.InstanceInfo,
state *terraform.InstanceState,
diff *terraform.InstanceDiff) (*terraform.InstanceState, error) {
if !diff.Destroy {
return &terraform.InstanceState{
ID: "foo",
}, nil
}
return nil, nil
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
mp.ImportStateFn = func(
info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) {
if id != "foo" {
return nil, fmt.Errorf("bad import ID: %s", id)
}
return []*terraform.InstanceState{
&terraform.InstanceState{
ID: "bar",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}, nil
}
checked := false
checkFn := func(s []*terraform.InstanceState) error {
checked = true
if s[0].ID != "bar" {
return fmt.Errorf("bad: %#v", s)
}
return nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStr,
},
TestStep{
Config: testConfigStr,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateCheck: checkFn,
},
},
})
if mt.failed() {
t.Fatalf("test failed: %s", mt.failMessage())
}
if !checked {
t.Fatal("didn't call check")
}
}
func TestTest_importStateIdPrefix(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.DiffReturn = nil
mp.ApplyFn = func(
info *terraform.InstanceInfo,
state *terraform.InstanceState,
diff *terraform.InstanceDiff) (*terraform.InstanceState, error) {
if !diff.Destroy {
return &terraform.InstanceState{
ID: "foo",
}, nil
}
return nil, nil
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
mp.ImportStateFn = func(
info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) {
if id != "bazfoo" {
return nil, fmt.Errorf("bad import ID: %s", id)
}
return []*terraform.InstanceState{
{
ID: "bar",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}, nil
}
checked := false
checkFn := func(s []*terraform.InstanceState) error {
checked = true
if s[0].ID != "bar" {
return fmt.Errorf("bad: %#v", s)
}
return nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
{
Config: testConfigStr,
},
{
Config: testConfigStr,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateCheck: checkFn,
ImportStateIdPrefix: "baz",
},
},
})
if mt.failed() {
t.Fatalf("test failed: %s", mt.failMessage())
}
if !checked {
t.Fatal("didn't call check")
}
}
func TestTest_importStateVerify(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.DiffReturn = nil
mp.ApplyFn = func(
info *terraform.InstanceInfo,
state *terraform.InstanceState,
diff *terraform.InstanceDiff) (*terraform.InstanceState, error) {
if !diff.Destroy {
return &terraform.InstanceState{
ID: "foo",
Attributes: map[string]string{
"foo": "bar",
},
}, nil
}
return nil, nil
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
if len(s.Attributes) == 0 {
s.Attributes = map[string]string{
"id": s.ID,
"foo": "bar",
}
}
return s, nil
}
mp.ImportStateFn = func(
info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) {
if id != "foo" {
return nil, fmt.Errorf("bad import ID: %s", id)
}
return []*terraform.InstanceState{
&terraform.InstanceState{
ID: "foo",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}, nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStr,
},
TestStep{
Config: testConfigStr,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateVerify: true,
},
},
})
if mt.failed() {
t.Fatalf("test failed: %s", mt.failMessage())
}
}
func TestTest_importStateVerifyFail(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.DiffReturn = nil
mp.ApplyFn = func(
info *terraform.InstanceInfo,
state *terraform.InstanceState,
diff *terraform.InstanceDiff) (*terraform.InstanceState, error) {
if !diff.Destroy {
return &terraform.InstanceState{
ID: "foo",
Attributes: map[string]string{
"foo": "bar",
},
}, nil
}
return nil, nil
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
mp.ImportStateFn = func(
info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) {
if id != "foo" {
return nil, fmt.Errorf("bad import ID: %s", id)
}
return []*terraform.InstanceState{
&terraform.InstanceState{
ID: "foo",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}, nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStr,
},
TestStep{
Config: testConfigStr,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateVerify: true,
},
},
})
if !mt.failed() {
t.Fatalf("test should fail")
}
}
func TestTest_importStateIdFunc(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.ImportStateFn = func(
info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) {
if id != "foo:bar" {
return nil, fmt.Errorf("bad import ID: %s", id)
}
return []*terraform.InstanceState{
{
ID: "foo",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}, nil
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
checked := false
checkFn := func(s []*terraform.InstanceState) error {
checked = true
if s[0].ID != "foo" {
return fmt.Errorf("bad: %#v", s)
}
return nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStrProvider,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateIdFunc: func(*terraform.State) (string, error) { return "foo:bar", nil },
ImportStateCheck: checkFn,
},
},
})
if mt.failed() {
t.Fatalf("test failed: %s", mt.failMessage())
}
if !checked {
t.Fatal("didn't call check")
}
}
func TestTest_importStateIdFuncFail(t *testing.T) {
t.Skip("test requires new provider implementation")
mp := testProvider()
mp.ImportStateFn = func(
info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) {
if id != "foo:bar" {
return nil, fmt.Errorf("bad import ID: %s", id)
}
return []*terraform.InstanceState{
{
ID: "foo",
Ephemeral: terraform.EphemeralState{Type: "test_instance"},
},
}, nil
}
mp.RefreshFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
checkFn := func(s []*terraform.InstanceState) error {
if s[0].ID != "foo" {
return fmt.Errorf("bad: %#v", s)
}
return nil
}
mt := new(mockT)
Test(mt, TestCase{
Providers: map[string]terraform.ResourceProvider{
"test": mp,
},
Steps: []TestStep{
TestStep{
Config: testConfigStrProvider,
ResourceName: "test_instance.foo",
ImportState: true,
ImportStateIdFunc: func(*terraform.State) (string, error) { return "foo:bar", errors.New("foobar") },
ImportStateCheck: checkFn,
},
},
})
if !mt.failed() {
t.Fatalf("test should fail")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,84 +0,0 @@
package resource
import (
"sync"
"time"
)
// Retry is a basic wrapper around StateChangeConf that will just retry
// a function until it no longer returns an error.
func Retry(timeout time.Duration, f RetryFunc) error {
// These are used to pull the error out of the function; need a mutex to
// avoid a data race.
var resultErr error
var resultErrMu sync.Mutex
c := &StateChangeConf{
Pending: []string{"retryableerror"},
Target: []string{"success"},
Timeout: timeout,
MinTimeout: 500 * time.Millisecond,
Refresh: func() (interface{}, string, error) {
rerr := f()
resultErrMu.Lock()
defer resultErrMu.Unlock()
if rerr == nil {
resultErr = nil
return 42, "success", nil
}
resultErr = rerr.Err
if rerr.Retryable {
return 42, "retryableerror", nil
}
return nil, "quit", rerr.Err
},
}
_, waitErr := c.WaitForState()
// Need to acquire the lock here to be able to avoid race using resultErr as
// the return value
resultErrMu.Lock()
defer resultErrMu.Unlock()
// resultErr may be nil because the wait timed out and resultErr was never
// set; this is still an error
if resultErr == nil {
return waitErr
}
// resultErr takes precedence over waitErr if both are set because it is
// more likely to be useful
return resultErr
}
// RetryFunc is the function retried until it succeeds.
type RetryFunc func() *RetryError
// RetryError is the required return type of RetryFunc. It forces client code
// to choose whether or not a given error is retryable.
type RetryError struct {
Err error
Retryable bool
}
// RetryableError is a helper to create a RetryError that's retryable from a
// given error.
func RetryableError(err error) *RetryError {
if err == nil {
return nil
}
return &RetryError{Err: err, Retryable: true}
}
// NonRetryableError is a helper to create a RetryError that's _not_ retryable
// from a given error.
func NonRetryableError(err error) *RetryError {
if err == nil {
return nil
}
return &RetryError{Err: err, Retryable: false}
}

View File

@ -1,95 +0,0 @@
package resource
import (
"fmt"
"testing"
"time"
)
func TestRetry(t *testing.T) {
t.Parallel()
tries := 0
f := func() *RetryError {
tries++
if tries == 3 {
return nil
}
return RetryableError(fmt.Errorf("error"))
}
err := Retry(10*time.Second, f)
if err != nil {
t.Fatalf("err: %s", err)
}
}
// make sure a slow StateRefreshFunc is allowed to complete after timeout
func TestRetry_grace(t *testing.T) {
t.Parallel()
f := func() *RetryError {
time.Sleep(1 * time.Second)
return nil
}
err := Retry(10*time.Millisecond, f)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestRetry_timeout(t *testing.T) {
t.Parallel()
f := func() *RetryError {
return RetryableError(fmt.Errorf("always"))
}
err := Retry(1*time.Second, f)
if err == nil {
t.Fatal("should error")
}
}
func TestRetry_hang(t *testing.T) {
old := refreshGracePeriod
refreshGracePeriod = 50 * time.Millisecond
defer func() {
refreshGracePeriod = old
}()
f := func() *RetryError {
time.Sleep(2 * time.Second)
return nil
}
err := Retry(50*time.Millisecond, f)
if err == nil {
t.Fatal("should error")
}
}
func TestRetry_error(t *testing.T) {
t.Parallel()
expected := fmt.Errorf("nope")
f := func() *RetryError {
return NonRetryableError(expected)
}
errCh := make(chan error)
go func() {
errCh <- Retry(1*time.Second, f)
}()
select {
case err := <-errCh:
if err != expected {
t.Fatalf("bad: %#v", err)
}
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
}

View File

@ -1,11 +0,0 @@
# Terraform Helper Lib: schema
The `schema` package provides a high-level interface for writing resource
providers for Terraform.
If you're writing a resource provider, we recommend you use this package.
The interface exposed by this package is much friendlier than trying to
write to the Terraform API directly. The core Terraform API is low-level
and built for maximum flexibility and control, whereas this library is built
as a framework around that to more easily write common providers.

View File

@ -1,200 +0,0 @@
package schema
import (
"context"
"fmt"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/terraform"
ctyconvert "github.com/zclconf/go-cty/cty/convert"
)
// Backend represents a partial backend.Backend implementation and simplifies
// the creation of configuration loading and validation.
//
// Unlike other schema structs such as Provider, this struct is meant to be
// embedded within your actual implementation. It provides implementations
// only for Input and Configure and gives you a method for accessing the
// configuration in the form of a ResourceData that you're expected to call
// from the other implementation funcs.
type Backend struct {
// Schema is the schema for the configuration of this backend. If this
// Backend has no configuration this can be omitted.
Schema map[string]*Schema
// ConfigureFunc is called to configure the backend. Use the
// FromContext* methods to extract information from the context.
// This can be nil, in which case nothing will be called but the
// config will still be stored.
ConfigureFunc func(context.Context) error
config *ResourceData
}
var (
backendConfigKey = contextKey("backend config")
)
// FromContextBackendConfig extracts a ResourceData with the configuration
// from the context. This should only be called by Backend functions.
func FromContextBackendConfig(ctx context.Context) *ResourceData {
return ctx.Value(backendConfigKey).(*ResourceData)
}
func (b *Backend) ConfigSchema() *configschema.Block {
// This is an alias of CoreConfigSchema just to implement the
// backend.Backend interface.
return b.CoreConfigSchema()
}
func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) {
if b == nil {
return configVal, nil
}
var diags tfdiags.Diagnostics
var err error
// In order to use Transform below, this needs to be filled out completely
// according the schema.
configVal, err = b.CoreConfigSchema().CoerceValue(configVal)
if err != nil {
return configVal, diags.Append(err)
}
// lookup any required, top-level attributes that are Null, and see if we
// have a Default value available.
configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) {
// we're only looking for top-level attributes
if len(path) != 1 {
return val, nil
}
// nothing to do if we already have a value
if !val.IsNull() {
return val, nil
}
// get the Schema definition for this attribute
getAttr, ok := path[0].(cty.GetAttrStep)
// these should all exist, but just ignore anything strange
if !ok {
return val, nil
}
attrSchema := b.Schema[getAttr.Name]
// continue to ignore anything that doesn't match
if attrSchema == nil {
return val, nil
}
// this is deprecated, so don't set it
if attrSchema.Deprecated != "" || attrSchema.Removed != "" {
return val, nil
}
// find a default value if it exists
def, err := attrSchema.DefaultValue()
if err != nil {
diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err))
return val, err
}
// no default
if def == nil {
return val, nil
}
// create a cty.Value and make sure it's the correct type
tmpVal := hcl2shim.HCL2ValueFromConfigValue(def)
// helper/schema used to allow setting "" to a bool
if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) {
// return a warning about the conversion
diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name)
tmpVal = cty.False
}
val, err = ctyconvert.Convert(tmpVal, val.Type())
if err != nil {
diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err))
}
return val, err
})
if err != nil {
// any error here was already added to the diagnostics
return configVal, diags
}
shimRC := b.shimConfig(configVal)
warns, errs := schemaMap(b.Schema).Validate(shimRC)
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
return configVal, diags
}
func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
if b == nil {
return nil
}
var diags tfdiags.Diagnostics
sm := schemaMap(b.Schema)
shimRC := b.shimConfig(obj)
// Get a ResourceData for this configuration. To do this, we actually
// generate an intermediary "diff" although that is never exposed.
diff, err := sm.Diff(nil, shimRC, nil, nil, true)
if err != nil {
diags = diags.Append(err)
return diags
}
data, err := sm.Data(nil, diff)
if err != nil {
diags = diags.Append(err)
return diags
}
b.config = data
if b.ConfigureFunc != nil {
err = b.ConfigureFunc(context.WithValue(
context.Background(), backendConfigKey, data))
if err != nil {
diags = diags.Append(err)
return diags
}
}
return diags
}
// shimConfig turns a new-style cty.Value configuration (which must be of
// an object type) into a minimal old-style *terraform.ResourceConfig object
// that should be populated enough to appease the not-yet-updated functionality
// in this package. This should be removed once everything is updated.
func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig {
shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{})
if !ok {
// If the configVal was nil, we still want a non-nil map here.
shimMap = map[string]interface{}{}
}
return &terraform.ResourceConfig{
Config: shimMap,
Raw: shimMap,
}
}
// Config returns the configuration. This is available after Configure is
// called.
func (b *Backend) Config() *ResourceData {
return b.config
}

View File

@ -1,193 +0,0 @@
package schema
import (
"context"
"fmt"
"testing"
"github.com/zclconf/go-cty/cty"
)
func TestBackendPrepare(t *testing.T) {
cases := []struct {
Name string
B *Backend
Config map[string]cty.Value
Expect map[string]cty.Value
Err bool
}{
{
"Basic required field",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Required: true,
Type: TypeString,
},
},
},
map[string]cty.Value{},
map[string]cty.Value{},
true,
},
{
"Null config",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Required: true,
Type: TypeString,
},
},
},
nil,
map[string]cty.Value{},
true,
},
{
"Basic required field set",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Required: true,
Type: TypeString,
},
},
},
map[string]cty.Value{
"foo": cty.StringVal("bar"),
},
map[string]cty.Value{
"foo": cty.StringVal("bar"),
},
false,
},
{
"unused default",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Optional: true,
Type: TypeString,
Default: "baz",
},
},
},
map[string]cty.Value{
"foo": cty.StringVal("bar"),
},
map[string]cty.Value{
"foo": cty.StringVal("bar"),
},
false,
},
{
"default",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeString,
Optional: true,
Default: "baz",
},
},
},
map[string]cty.Value{},
map[string]cty.Value{
"foo": cty.StringVal("baz"),
},
false,
},
{
"default func",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeString,
Optional: true,
DefaultFunc: func() (interface{}, error) {
return "baz", nil
},
},
},
},
map[string]cty.Value{},
map[string]cty.Value{
"foo": cty.StringVal("baz"),
},
false,
},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) {
cfgVal := cty.NullVal(cty.Object(map[string]cty.Type{}))
if tc.Config != nil {
cfgVal = cty.ObjectVal(tc.Config)
}
configVal, diags := tc.B.PrepareConfig(cfgVal)
if diags.HasErrors() != tc.Err {
for _, d := range diags {
t.Error(d.Description())
}
}
if tc.Err {
return
}
expect := cty.ObjectVal(tc.Expect)
if !expect.RawEquals(configVal) {
t.Fatalf("\nexpected: %#v\ngot: %#v\n", expect, configVal)
}
})
}
}
func TestBackendConfigure(t *testing.T) {
cases := []struct {
Name string
B *Backend
Config map[string]cty.Value
Err bool
}{
{
"Basic config",
&Backend{
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeInt,
Optional: true,
},
},
ConfigureFunc: func(ctx context.Context) error {
d := FromContextBackendConfig(ctx)
if d.Get("foo").(int) != 42 {
return fmt.Errorf("bad config data")
}
return nil
},
},
map[string]cty.Value{
"foo": cty.NumberIntVal(42),
},
false,
},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) {
diags := tc.B.Configure(cty.ObjectVal(tc.Config))
if diags.HasErrors() != tc.Err {
t.Errorf("wrong number of diagnostics")
}
})
}
}

View File

@ -1,309 +0,0 @@
package schema
import (
"fmt"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/zclconf/go-cty/cty"
)
// The functions and methods in this file are concerned with the conversion
// of this package's schema model into the slightly-lower-level schema model
// used by Terraform core for configuration parsing.
// CoreConfigSchema lowers the receiver to the schema model expected by
// Terraform core.
//
// This lower-level model has fewer features than the schema in this package,
// describing only the basic structure of configuration and state values we
// expect. The full schemaMap from this package is still required for full
// validation, handling of default values, etc.
//
// This method presumes a schema that passes InternalValidate, and so may
// panic or produce an invalid result if given an invalid schemaMap.
func (m schemaMap) CoreConfigSchema() *configschema.Block {
if len(m) == 0 {
// We return an actual (empty) object here, rather than a nil,
// because a nil result would mean that we don't have a schema at
// all, rather than that we have an empty one.
return &configschema.Block{}
}
ret := &configschema.Block{
Attributes: map[string]*configschema.Attribute{},
BlockTypes: map[string]*configschema.NestedBlock{},
}
for name, schema := range m {
if schema.Elem == nil {
ret.Attributes[name] = schema.coreConfigSchemaAttribute()
continue
}
if schema.Type == TypeMap {
// For TypeMap in particular, it isn't valid for Elem to be a
// *Resource (since that would be ambiguous in flatmap) and
// so Elem is treated as a TypeString schema if so. This matches
// how the field readers treat this situation, for compatibility
// with configurations targeting Terraform 0.11 and earlier.
if _, isResource := schema.Elem.(*Resource); isResource {
sch := *schema // shallow copy
sch.Elem = &Schema{
Type: TypeString,
}
ret.Attributes[name] = sch.coreConfigSchemaAttribute()
continue
}
}
switch schema.ConfigMode {
case SchemaConfigModeAttr:
ret.Attributes[name] = schema.coreConfigSchemaAttribute()
case SchemaConfigModeBlock:
ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
default: // SchemaConfigModeAuto, or any other invalid value
if schema.Computed && !schema.Optional {
// Computed-only schemas are always handled as attributes,
// because they never appear in configuration.
ret.Attributes[name] = schema.coreConfigSchemaAttribute()
continue
}
switch schema.Elem.(type) {
case *Schema, ValueType:
ret.Attributes[name] = schema.coreConfigSchemaAttribute()
case *Resource:
ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
default:
// Should never happen for a valid schema
panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem))
}
}
}
return ret
}
// coreConfigSchemaAttribute prepares a configschema.Attribute representation
// of a schema. This is appropriate only for primitives or collections whose
// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections
// whose elem is a whole resource.
func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute {
// The Schema.DefaultFunc capability adds some extra weirdness here since
// it can be combined with "Required: true" to create a situation where
// required-ness is conditional. Terraform Core doesn't share this concept,
// so we must sniff for this possibility here and conditionally turn
// off the "Required" flag if it looks like the DefaultFunc is going
// to provide a value.
// This is not 100% true to the original interface of DefaultFunc but
// works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc
// situations, which are the main cases we care about.
//
// Note that this also has a consequence for commands that return schema
// information for documentation purposes: running those for certain
// providers will produce different results depending on which environment
// variables are set. We accept that weirdness in order to keep this
// interface to core otherwise simple.
reqd := s.Required
opt := s.Optional
if reqd && s.DefaultFunc != nil {
v, err := s.DefaultFunc()
// We can't report errors from here, so we'll instead just force
// "Required" to false and let the provider try calling its
// DefaultFunc again during the validate step, where it can then
// return the error.
if err != nil || (err == nil && v != nil) {
reqd = false
opt = true
}
}
return &configschema.Attribute{
Type: s.coreConfigSchemaType(),
Optional: opt,
Required: reqd,
Computed: s.Computed,
Sensitive: s.Sensitive,
Description: s.Description,
}
}
// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of
// a schema. This is appropriate only for collections whose Elem is an instance
// of Resource, and will panic otherwise.
func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock {
ret := &configschema.NestedBlock{}
if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil {
ret.Block = *nested
}
switch s.Type {
case TypeList:
ret.Nesting = configschema.NestingList
case TypeSet:
ret.Nesting = configschema.NestingSet
case TypeMap:
ret.Nesting = configschema.NestingMap
default:
// Should never happen for a valid schema
panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type))
}
ret.MinItems = s.MinItems
ret.MaxItems = s.MaxItems
if s.Required && s.MinItems == 0 {
// configschema doesn't have a "required" representation for nested
// blocks, but we can fake it by requiring at least one item.
ret.MinItems = 1
}
if s.Optional && s.MinItems > 0 {
// Historically helper/schema would ignore MinItems if Optional were
// set, so we must mimic this behavior here to ensure that providers
// relying on that undocumented behavior can continue to operate as
// they did before.
ret.MinItems = 0
}
if s.Computed && !s.Optional {
// MinItems/MaxItems are meaningless for computed nested blocks, since
// they are never set by the user anyway. This ensures that we'll never
// generate weird errors about them.
ret.MinItems = 0
ret.MaxItems = 0
}
return ret
}
// coreConfigSchemaType determines the core config schema type that corresponds
// to a particular schema's type.
func (s *Schema) coreConfigSchemaType() cty.Type {
switch s.Type {
case TypeString:
return cty.String
case TypeBool:
return cty.Bool
case TypeInt, TypeFloat:
// configschema doesn't distinguish int and float, so helper/schema
// will deal with this as an additional validation step after
// configuration has been parsed and decoded.
return cty.Number
case TypeList, TypeSet, TypeMap:
var elemType cty.Type
switch set := s.Elem.(type) {
case *Schema:
elemType = set.coreConfigSchemaType()
case ValueType:
// This represents a mistake in the provider code, but it's a
// common one so we'll just shim it.
elemType = (&Schema{Type: set}).coreConfigSchemaType()
case *Resource:
// By default we construct a NestedBlock in this case, but this
// behavior is selected either for computed-only schemas or
// when ConfigMode is explicitly SchemaConfigModeBlock.
// See schemaMap.CoreConfigSchema for the exact rules.
elemType = set.coreConfigSchema().ImpliedType()
default:
if set != nil {
// Should never happen for a valid schema
panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem))
}
// Some pre-existing schemas assume string as default, so we need
// to be compatible with them.
elemType = cty.String
}
switch s.Type {
case TypeList:
return cty.List(elemType)
case TypeSet:
return cty.Set(elemType)
case TypeMap:
return cty.Map(elemType)
default:
// can never get here in practice, due to the case we're inside
panic("invalid collection type")
}
default:
// should never happen for a valid schema
panic(fmt.Errorf("invalid Schema.Type %s", s.Type))
}
}
// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on
// the resource's schema. CoreConfigSchema adds the implicitly required "id"
// attribute for top level resources if it doesn't exist.
func (r *Resource) CoreConfigSchema() *configschema.Block {
block := r.coreConfigSchema()
if block.Attributes == nil {
block.Attributes = map[string]*configschema.Attribute{}
}
// Add the implicitly required "id" field if it doesn't exist
if block.Attributes["id"] == nil {
block.Attributes["id"] = &configschema.Attribute{
Type: cty.String,
Optional: true,
Computed: true,
}
}
_, timeoutsAttr := block.Attributes[TimeoutsConfigKey]
_, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey]
// Insert configured timeout values into the schema, as long as the schema
// didn't define anything else by that name.
if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock {
timeouts := configschema.Block{
Attributes: map[string]*configschema.Attribute{},
}
if r.Timeouts.Create != nil {
timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{
Type: cty.String,
Optional: true,
}
}
if r.Timeouts.Read != nil {
timeouts.Attributes[TimeoutRead] = &configschema.Attribute{
Type: cty.String,
Optional: true,
}
}
if r.Timeouts.Update != nil {
timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{
Type: cty.String,
Optional: true,
}
}
if r.Timeouts.Delete != nil {
timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{
Type: cty.String,
Optional: true,
}
}
if r.Timeouts.Default != nil {
timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{
Type: cty.String,
Optional: true,
}
}
block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{
Nesting: configschema.NestingSingle,
Block: timeouts,
}
}
return block
}
func (r *Resource) coreConfigSchema() *configschema.Block {
return schemaMap(r.Schema).CoreConfigSchema()
}
// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema
// on the backends's schema.
func (r *Backend) CoreConfigSchema() *configschema.Block {
return schemaMap(r.Schema).CoreConfigSchema()
}

View File

@ -1,458 +0,0 @@
package schema
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/configs/configschema"
)
// add the implicit "id" attribute for test resources
func testResource(block *configschema.Block) *configschema.Block {
if block.Attributes == nil {
block.Attributes = make(map[string]*configschema.Attribute)
}
if block.BlockTypes == nil {
block.BlockTypes = make(map[string]*configschema.NestedBlock)
}
if block.Attributes["id"] == nil {
block.Attributes["id"] = &configschema.Attribute{
Type: cty.String,
Optional: true,
Computed: true,
}
}
return block
}
func TestSchemaMapCoreConfigSchema(t *testing.T) {
tests := map[string]struct {
Schema map[string]*Schema
Want *configschema.Block
}{
"empty": {
map[string]*Schema{},
testResource(&configschema.Block{}),
},
"primitives": {
map[string]*Schema{
"int": {
Type: TypeInt,
Required: true,
Description: "foo bar baz",
},
"float": {
Type: TypeFloat,
Optional: true,
},
"bool": {
Type: TypeBool,
Computed: true,
},
"string": {
Type: TypeString,
Optional: true,
Computed: true,
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"int": {
Type: cty.Number,
Required: true,
Description: "foo bar baz",
},
"float": {
Type: cty.Number,
Optional: true,
},
"bool": {
Type: cty.Bool,
Computed: true,
},
"string": {
Type: cty.String,
Optional: true,
Computed: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
"simple collections": {
map[string]*Schema{
"list": {
Type: TypeList,
Required: true,
Elem: &Schema{
Type: TypeInt,
},
},
"set": {
Type: TypeSet,
Optional: true,
Elem: &Schema{
Type: TypeString,
},
},
"map": {
Type: TypeMap,
Optional: true,
Elem: &Schema{
Type: TypeBool,
},
},
"map_default_type": {
Type: TypeMap,
Optional: true,
// Maps historically don't have elements because we
// assumed they would be strings, so this needs to work
// for pre-existing schemas.
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"list": {
Type: cty.List(cty.Number),
Required: true,
},
"set": {
Type: cty.Set(cty.String),
Optional: true,
},
"map": {
Type: cty.Map(cty.Bool),
Optional: true,
},
"map_default_type": {
Type: cty.Map(cty.String),
Optional: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
"incorrectly-specified collections": {
// Historically we tolerated setting a type directly as the Elem
// attribute, rather than a Schema object. This is common enough
// in existing provider code that we must support it as an alias
// for a schema object with the given type.
map[string]*Schema{
"list": {
Type: TypeList,
Required: true,
Elem: TypeInt,
},
"set": {
Type: TypeSet,
Optional: true,
Elem: TypeString,
},
"map": {
Type: TypeMap,
Optional: true,
Elem: TypeBool,
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"list": {
Type: cty.List(cty.Number),
Required: true,
},
"set": {
Type: cty.Set(cty.String),
Optional: true,
},
"map": {
Type: cty.Map(cty.Bool),
Optional: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
"sub-resource collections": {
map[string]*Schema{
"list": {
Type: TypeList,
Required: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
MinItems: 1,
MaxItems: 2,
},
"set": {
Type: TypeSet,
Required: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
},
"map": {
Type: TypeMap,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
// This one becomes a string attribute because helper/schema
// doesn't actually support maps of resource. The given
// "Elem" is just ignored entirely here, which is important
// because that is also true of the helper/schema logic and
// existing providers rely on this being ignored for
// correct operation.
"map": {
Type: cty.Map(cty.String),
Optional: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{
"list": {
Nesting: configschema.NestingList,
Block: configschema.Block{},
MinItems: 1,
MaxItems: 2,
},
"set": {
Nesting: configschema.NestingSet,
Block: configschema.Block{},
MinItems: 1, // because schema is Required
},
},
}),
},
"sub-resource collections minitems+optional": {
// This particular case is an odd one where the provider gives
// conflicting information about whether a sub-resource is required,
// by marking it as optional but also requiring one item.
// Historically the optional-ness "won" here, and so we must
// honor that for compatibility with providers that relied on this
// undocumented interaction.
map[string]*Schema{
"list": {
Type: TypeList,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
MinItems: 1,
MaxItems: 1,
},
"set": {
Type: TypeSet,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
MinItems: 1,
MaxItems: 1,
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{},
BlockTypes: map[string]*configschema.NestedBlock{
"list": {
Nesting: configschema.NestingList,
Block: configschema.Block{},
MinItems: 0,
MaxItems: 1,
},
"set": {
Nesting: configschema.NestingSet,
Block: configschema.Block{},
MinItems: 0,
MaxItems: 1,
},
},
}),
},
"sub-resource collections minitems+computed": {
map[string]*Schema{
"list": {
Type: TypeList,
Computed: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
MinItems: 1,
MaxItems: 1,
},
"set": {
Type: TypeSet,
Computed: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
MinItems: 1,
MaxItems: 1,
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"list": {
Type: cty.List(cty.EmptyObject),
Computed: true,
},
"set": {
Type: cty.Set(cty.EmptyObject),
Computed: true,
},
},
}),
},
"nested attributes and blocks": {
map[string]*Schema{
"foo": {
Type: TypeList,
Required: true,
Elem: &Resource{
Schema: map[string]*Schema{
"bar": {
Type: TypeList,
Required: true,
Elem: &Schema{
Type: TypeList,
Elem: &Schema{
Type: TypeString,
},
},
},
"baz": {
Type: TypeSet,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{},
},
},
},
},
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{},
BlockTypes: map[string]*configschema.NestedBlock{
"foo": &configschema.NestedBlock{
Nesting: configschema.NestingList,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"bar": {
Type: cty.List(cty.List(cty.String)),
Required: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{
"baz": {
Nesting: configschema.NestingSet,
Block: configschema.Block{},
},
},
},
MinItems: 1, // because schema is Required
},
},
}),
},
"sensitive": {
map[string]*Schema{
"string": {
Type: TypeString,
Optional: true,
Sensitive: true,
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {
Type: cty.String,
Optional: true,
Sensitive: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
"conditionally required on": {
map[string]*Schema{
"string": {
Type: TypeString,
Required: true,
DefaultFunc: func() (interface{}, error) {
return nil, nil
},
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {
Type: cty.String,
Required: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
"conditionally required off": {
map[string]*Schema{
"string": {
Type: TypeString,
Required: true,
DefaultFunc: func() (interface{}, error) {
// If we return a non-nil default then this overrides
// the "Required: true" for the purpose of building
// the core schema, so that core will ignore it not
// being set and let the provider handle it.
return "boop", nil
},
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {
Type: cty.String,
Optional: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
"conditionally required error": {
map[string]*Schema{
"string": {
Type: TypeString,
Required: true,
DefaultFunc: func() (interface{}, error) {
return nil, fmt.Errorf("placeholder error")
},
},
},
testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"string": {
Type: cty.String,
Optional: true, // Just so we can progress to provider-driven validation and return the error there
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := (&Resource{Schema: test.Schema}).CoreConfigSchema()
if !cmp.Equal(got, test.Want, equateEmpty, typeComparer) {
t.Error(cmp.Diff(got, test.Want, equateEmpty, typeComparer))
}
})
}
}

View File

@ -1,59 +0,0 @@
package schema
import (
"fmt"
)
// DataSourceResourceShim takes a Resource instance describing a data source
// (with a Read implementation and a Schema, at least) and returns a new
// Resource instance with additional Create and Delete implementations that
// allow the data source to be used as a resource.
//
// This is a backward-compatibility layer for data sources that were formerly
// read-only resources before the data source concept was added. It should not
// be used for any *new* data sources.
//
// The Read function for the data source *must* call d.SetId with a non-empty
// id in order for this shim to function as expected.
//
// The provided Resource instance, and its schema, will be modified in-place
// to make it suitable for use as a full resource.
func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
// Recursively, in-place adjust the schema so that it has ForceNew
// on any user-settable resource.
dataSourceResourceShimAdjustSchema(dataSource.Schema)
dataSource.Create = CreateFunc(dataSource.Read)
dataSource.Delete = func(d *ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
dataSource.Update = nil // should already be nil, but let's make sure
// FIXME: Link to some further docs either on the website or in the
// changelog, once such a thing exists.
dataSource.DeprecationMessage = fmt.Sprintf(
"using %s as a resource is deprecated; consider using the data source instead",
name,
)
return dataSource
}
func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
for _, s := range schema {
// If the attribute is configurable then it must be ForceNew,
// since we have no Update implementation.
if s.Required || s.Optional {
s.ForceNew = true
}
// If the attribute is a nested resource, we need to recursively
// apply these same adjustments to it.
if s.Elem != nil {
if r, ok := s.Elem.(*Resource); ok {
dataSourceResourceShimAdjustSchema(r.Schema)
}
}
}
}

View File

@ -1,6 +0,0 @@
package schema
// Equal is an interface that checks for deep equality between two objects.
type Equal interface {
Equal(interface{}) bool
}

View File

@ -1,343 +0,0 @@
package schema
import (
"fmt"
"strconv"
"strings"
)
// FieldReaders are responsible for decoding fields out of data into
// the proper typed representation. ResourceData uses this to query data
// out of multiple sources: config, state, diffs, etc.
type FieldReader interface {
ReadField([]string) (FieldReadResult, error)
}
// FieldReadResult encapsulates all the resulting data from reading
// a field.
type FieldReadResult struct {
// Value is the actual read value. NegValue is the _negative_ value
// or the items that should be removed (if they existed). NegValue
// doesn't make sense for primitives but is important for any
// container types such as maps, sets, lists.
Value interface{}
ValueProcessed interface{}
// Exists is true if the field was found in the data. False means
// it wasn't found if there was no error.
Exists bool
// Computed is true if the field was found but the value
// is computed.
Computed bool
}
// ValueOrZero returns the value of this result or the zero value of the
// schema type, ensuring a consistent non-nil return value.
func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
if r.Value != nil {
return r.Value
}
return s.ZeroValue()
}
// SchemasForFlatmapPath tries its best to find a sequence of schemas that
// the given dot-delimited attribute path traverses through.
func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema {
parts := strings.Split(path, ".")
return addrToSchema(parts, schemaMap)
}
// addrToSchema finds the final element schema for the given address
// and the given schema. It returns all the schemas that led to the final
// schema. These are in order of the address (out to in).
func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
current := &Schema{
Type: typeObject,
Elem: schemaMap,
}
// If we aren't given an address, then the user is requesting the
// full object, so we return the special value which is the full object.
if len(addr) == 0 {
return []*Schema{current}
}
result := make([]*Schema, 0, len(addr))
for len(addr) > 0 {
k := addr[0]
addr = addr[1:]
REPEAT:
// We want to trim off the first "typeObject" since its not a
// real lookup that people do. i.e. []string{"foo"} in a structure
// isn't {typeObject, typeString}, its just a {typeString}.
if len(result) > 0 || current.Type != typeObject {
result = append(result, current)
}
switch t := current.Type; t {
case TypeBool, TypeInt, TypeFloat, TypeString:
if len(addr) > 0 {
return nil
}
case TypeList, TypeSet:
isIndex := len(addr) > 0 && addr[0] == "#"
switch v := current.Elem.(type) {
case *Resource:
current = &Schema{
Type: typeObject,
Elem: v.Schema,
}
case *Schema:
current = v
case ValueType:
current = &Schema{Type: v}
default:
// we may not know the Elem type and are just looking for the
// index
if isIndex {
break
}
if len(addr) == 0 {
// we've processed the address, so return what we've
// collected
return result
}
if len(addr) == 1 {
if _, err := strconv.Atoi(addr[0]); err == nil {
// we're indexing a value without a schema. This can
// happen if the list is nested in another schema type.
// Default to a TypeString like we do with a map
current = &Schema{Type: TypeString}
break
}
}
return nil
}
// If we only have one more thing and the next thing
// is a #, then we're accessing the index which is always
// an int.
if isIndex {
current = &Schema{Type: TypeInt}
break
}
case TypeMap:
if len(addr) > 0 {
switch v := current.Elem.(type) {
case ValueType:
current = &Schema{Type: v}
case *Schema:
current, _ = current.Elem.(*Schema)
default:
// maps default to string values. This is all we can have
// if this is nested in another list or map.
current = &Schema{Type: TypeString}
}
}
case typeObject:
// If we're already in the object, then we want to handle Sets
// and Lists specially. Basically, their next key is the lookup
// key (the set value or the list element). For these scenarios,
// we just want to skip it and move to the next element if there
// is one.
if len(result) > 0 {
lastType := result[len(result)-2].Type
if lastType == TypeSet || lastType == TypeList {
if len(addr) == 0 {
break
}
k = addr[0]
addr = addr[1:]
}
}
m := current.Elem.(map[string]*Schema)
val, ok := m[k]
if !ok {
return nil
}
current = val
goto REPEAT
}
}
return result
}
// readListField is a generic method for reading a list field out of a
// a FieldReader. It does this based on the assumption that there is a key
// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
// after that point.
func readListField(
r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
addrPadded := make([]string, len(addr)+1)
copy(addrPadded, addr)
addrPadded[len(addrPadded)-1] = "#"
// Get the number of elements in the list
countResult, err := r.ReadField(addrPadded)
if err != nil {
return FieldReadResult{}, err
}
if !countResult.Exists {
// No count, means we have no list
countResult.Value = 0
}
// If we have an empty list, then return an empty list
if countResult.Computed || countResult.Value.(int) == 0 {
return FieldReadResult{
Value: []interface{}{},
Exists: countResult.Exists,
Computed: countResult.Computed,
}, nil
}
// Go through each count, and get the item value out of it
result := make([]interface{}, countResult.Value.(int))
for i, _ := range result {
is := strconv.FormatInt(int64(i), 10)
addrPadded[len(addrPadded)-1] = is
rawResult, err := r.ReadField(addrPadded)
if err != nil {
return FieldReadResult{}, err
}
if !rawResult.Exists {
// This should never happen, because by the time the data
// gets to the FieldReaders, all the defaults should be set by
// Schema.
rawResult.Value = nil
}
result[i] = rawResult.Value
}
return FieldReadResult{
Value: result,
Exists: true,
}, nil
}
// readObjectField is a generic method for reading objects out of FieldReaders
// based on the assumption that building an address of []string{k, FIELD}
// will result in the proper field data.
func readObjectField(
r FieldReader,
addr []string,
schema map[string]*Schema) (FieldReadResult, error) {
result := make(map[string]interface{})
exists := false
for field, s := range schema {
addrRead := make([]string, len(addr), len(addr)+1)
copy(addrRead, addr)
addrRead = append(addrRead, field)
rawResult, err := r.ReadField(addrRead)
if err != nil {
return FieldReadResult{}, err
}
if rawResult.Exists {
exists = true
}
result[field] = rawResult.ValueOrZero(s)
}
return FieldReadResult{
Value: result,
Exists: exists,
}, nil
}
// convert map values to the proper primitive type based on schema.Elem
func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error {
elemType, err := getValueType(k, schema)
if err != nil {
return err
}
switch elemType {
case TypeInt, TypeFloat, TypeBool:
for k, v := range m {
vs, ok := v.(string)
if !ok {
continue
}
v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
if err != nil {
return err
}
m[k] = v
}
}
return nil
}
func stringToPrimitive(
value string, computed bool, schema *Schema) (interface{}, error) {
var returnVal interface{}
switch schema.Type {
case TypeBool:
if value == "" {
returnVal = false
break
}
if computed {
break
}
v, err := strconv.ParseBool(value)
if err != nil {
return nil, err
}
returnVal = v
case TypeFloat:
if value == "" {
returnVal = 0.0
break
}
if computed {
break
}
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return nil, err
}
returnVal = v
case TypeInt:
if value == "" {
returnVal = 0
break
}
if computed {
break
}
v, err := strconv.ParseInt(value, 0, 0)
if err != nil {
return nil, err
}
returnVal = int(v)
case TypeString:
returnVal = value
default:
panic(fmt.Sprintf("Unknown type: %s", schema.Type))
}
return returnVal, nil
}

View File

@ -1,353 +0,0 @@
package schema
import (
"fmt"
"log"
"strconv"
"strings"
"sync"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/mapstructure"
)
// ConfigFieldReader reads fields out of an untyped map[string]string to the
// best of its ability. It also applies defaults from the Schema. (The other
// field readers do not need default handling because they source fully
// populated data structures.)
type ConfigFieldReader struct {
Config *terraform.ResourceConfig
Schema map[string]*Schema
indexMaps map[string]map[string]int
once sync.Once
}
func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
return r.readField(address, false)
}
func (r *ConfigFieldReader) readField(
address []string, nested bool) (FieldReadResult, error) {
schemaList := addrToSchema(address, r.Schema)
if len(schemaList) == 0 {
return FieldReadResult{}, nil
}
if !nested {
// If we have a set anywhere in the address, then we need to
// read that set out in order and actually replace that part of
// the address with the real list index. i.e. set.50 might actually
// map to set.12 in the config, since it is in list order in the
// config, not indexed by set value.
for i, v := range schemaList {
// Sets are the only thing that cause this issue.
if v.Type != TypeSet {
continue
}
// If we're at the end of the list, then we don't have to worry
// about this because we're just requesting the whole set.
if i == len(schemaList)-1 {
continue
}
// If we're looking for the count, then ignore...
if address[i+1] == "#" {
continue
}
indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
if !ok {
// Get the set so we can get the index map that tells us the
// mapping of the hash code to the list index
_, err := r.readSet(address[:i+1], v)
if err != nil {
return FieldReadResult{}, err
}
indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
}
index, ok := indexMap[address[i+1]]
if !ok {
return FieldReadResult{}, nil
}
address[i+1] = strconv.FormatInt(int64(index), 10)
}
}
k := strings.Join(address, ".")
schema := schemaList[len(schemaList)-1]
// If we're getting the single element of a promoted list, then
// check to see if we have a single element we need to promote.
if address[len(address)-1] == "0" && len(schemaList) > 1 {
lastSchema := schemaList[len(schemaList)-2]
if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
k := strings.Join(address[:len(address)-1], ".")
result, err := r.readPrimitive(k, schema)
if err == nil {
return result, nil
}
}
}
if protoVersion5 {
switch schema.Type {
case TypeList, TypeSet, TypeMap, typeObject:
// Check if the value itself is unknown.
// The new protocol shims will add unknown values to this list of
// ComputedKeys. This is the only way we have to indicate that a
// collection is unknown in the config
for _, unknown := range r.Config.ComputedKeys {
if k == unknown {
log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k)
return FieldReadResult{Computed: true, Exists: true}, nil
}
}
}
}
switch schema.Type {
case TypeBool, TypeFloat, TypeInt, TypeString:
return r.readPrimitive(k, schema)
case TypeList:
// If we support promotion then we first check if we have a lone
// value that we must promote.
// a value that is alone.
if schema.PromoteSingle {
result, err := r.readPrimitive(k, schema.Elem.(*Schema))
if err == nil && result.Exists {
result.Value = []interface{}{result.Value}
return result, nil
}
}
return readListField(&nestedConfigFieldReader{r}, address, schema)
case TypeMap:
return r.readMap(k, schema)
case TypeSet:
return r.readSet(address, schema)
case typeObject:
return readObjectField(
&nestedConfigFieldReader{r},
address, schema.Elem.(map[string]*Schema))
default:
panic(fmt.Sprintf("Unknown type: %s", schema.Type))
}
}
func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
// We want both the raw value and the interpolated. We use the interpolated
// to store actual values and we use the raw one to check for
// computed keys. Actual values are obtained in the switch, depending on
// the type of the raw value.
mraw, ok := r.Config.GetRaw(k)
if !ok {
// check if this is from an interpolated field by seeing if it exists
// in the config
_, ok := r.Config.Get(k)
if !ok {
// this really doesn't exist
return FieldReadResult{}, nil
}
// We couldn't fetch the value from a nested data structure, so treat the
// raw value as an interpolation string. The mraw value is only used
// for the type switch below.
mraw = "${INTERPOLATED}"
}
result := make(map[string]interface{})
computed := false
switch m := mraw.(type) {
case string:
// This is a map which has come out of an interpolated variable, so we
// can just get the value directly from config. Values cannot be computed
// currently.
v, _ := r.Config.Get(k)
// If this isn't a map[string]interface, it must be computed.
mapV, ok := v.(map[string]interface{})
if !ok {
return FieldReadResult{
Exists: true,
Computed: true,
}, nil
}
// Otherwise we can proceed as usual.
for i, iv := range mapV {
result[i] = iv
}
case []interface{}:
for i, innerRaw := range m {
for ik := range innerRaw.(map[string]interface{}) {
key := fmt.Sprintf("%s.%d.%s", k, i, ik)
if r.Config.IsComputed(key) {
computed = true
break
}
v, _ := r.Config.Get(key)
result[ik] = v
}
}
case []map[string]interface{}:
for i, innerRaw := range m {
for ik := range innerRaw {
key := fmt.Sprintf("%s.%d.%s", k, i, ik)
if r.Config.IsComputed(key) {
computed = true
break
}
v, _ := r.Config.Get(key)
result[ik] = v
}
}
case map[string]interface{}:
for ik := range m {
key := fmt.Sprintf("%s.%s", k, ik)
if r.Config.IsComputed(key) {
computed = true
break
}
v, _ := r.Config.Get(key)
result[ik] = v
}
case nil:
// the map may have been empty on the configuration, so we leave the
// empty result
default:
panic(fmt.Sprintf("unknown type: %#v", mraw))
}
err := mapValuesToPrimitive(k, result, schema)
if err != nil {
return FieldReadResult{}, nil
}
var value interface{}
if !computed {
value = result
}
return FieldReadResult{
Value: value,
Exists: true,
Computed: computed,
}, nil
}
func (r *ConfigFieldReader) readPrimitive(
k string, schema *Schema) (FieldReadResult, error) {
raw, ok := r.Config.Get(k)
if !ok {
// Nothing in config, but we might still have a default from the schema
var err error
raw, err = schema.DefaultValue()
if err != nil {
return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
}
if raw == nil {
return FieldReadResult{}, nil
}
}
var result string
if err := mapstructure.WeakDecode(raw, &result); err != nil {
return FieldReadResult{}, err
}
computed := r.Config.IsComputed(k)
returnVal, err := stringToPrimitive(result, computed, schema)
if err != nil {
return FieldReadResult{}, err
}
return FieldReadResult{
Value: returnVal,
Exists: true,
Computed: computed,
}, nil
}
func (r *ConfigFieldReader) readSet(
address []string, schema *Schema) (FieldReadResult, error) {
indexMap := make(map[string]int)
// Create the set that will be our result
set := schema.ZeroValue().(*Set)
raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
if err != nil {
return FieldReadResult{}, err
}
if !raw.Exists {
return FieldReadResult{Value: set}, nil
}
// If the list is computed, the set is necessarilly computed
if raw.Computed {
return FieldReadResult{
Value: set,
Exists: true,
Computed: raw.Computed,
}, nil
}
// Build up the set from the list elements
for i, v := range raw.Value.([]interface{}) {
// Check if any of the keys in this item are computed
computed := r.hasComputedSubKeys(
fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
code := set.add(v, computed)
indexMap[code] = i
}
r.indexMaps[strings.Join(address, ".")] = indexMap
return FieldReadResult{
Value: set,
Exists: true,
}, nil
}
// hasComputedSubKeys walks through a schema and returns whether or not the
// given key contains any subkeys that are computed.
func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
prefix := key + "."
switch t := schema.Elem.(type) {
case *Resource:
for k, schema := range t.Schema {
if r.Config.IsComputed(prefix + k) {
return true
}
if r.hasComputedSubKeys(prefix+k, schema) {
return true
}
}
}
return false
}
// nestedConfigFieldReader is a funny little thing that just wraps a
// ConfigFieldReader to call readField when ReadField is called so that
// we don't recalculate the set rewrites in the address, which leads to
// an infinite loop.
type nestedConfigFieldReader struct {
Reader *ConfigFieldReader
}
func (r *nestedConfigFieldReader) ReadField(
address []string) (FieldReadResult, error) {
return r.Reader.readField(address, true)
}

View File

@ -1,540 +0,0 @@
package schema
import (
"bytes"
"fmt"
"reflect"
"testing"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/terraform"
)
func TestConfigFieldReader_impl(t *testing.T) {
var _ FieldReader = new(ConfigFieldReader)
}
func TestConfigFieldReader(t *testing.T) {
testFieldReader(t, func(s map[string]*Schema) FieldReader {
return &ConfigFieldReader{
Schema: s,
Config: testConfig(t, map[string]interface{}{
"bool": true,
"float": 3.1415,
"int": 42,
"string": "string",
"list": []interface{}{"foo", "bar"},
"listInt": []interface{}{21, 42},
"map": map[string]interface{}{
"foo": "bar",
"bar": "baz",
},
"mapInt": map[string]interface{}{
"one": "1",
"two": "2",
},
"mapIntNestedSchema": map[string]interface{}{
"one": "1",
"two": "2",
},
"mapFloat": map[string]interface{}{
"oneDotTwo": "1.2",
},
"mapBool": map[string]interface{}{
"True": "true",
"False": "false",
},
"set": []interface{}{10, 50},
"setDeep": []interface{}{
map[string]interface{}{
"index": 10,
"value": "foo",
},
map[string]interface{}{
"index": 50,
"value": "bar",
},
},
}),
}
})
}
// This contains custom table tests for our ConfigFieldReader
func TestConfigFieldReader_custom(t *testing.T) {
schema := map[string]*Schema{
"bool": &Schema{
Type: TypeBool,
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Config *terraform.ResourceConfig
Err bool
}{
"basic": {
[]string{"bool"},
FieldReadResult{
Value: true,
Exists: true,
},
testConfig(t, map[string]interface{}{
"bool": true,
}),
false,
},
"computed": {
[]string{"bool"},
FieldReadResult{
Exists: true,
Computed: true,
},
testConfig(t, map[string]interface{}{
"bool": hcl2shim.UnknownVariableValue,
}),
false,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
r := &ConfigFieldReader{
Schema: schema,
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to a list so its more easily checked.
out.Value = s.List()
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
})
}
}
func TestConfigFieldReader_DefaultHandling(t *testing.T) {
schema := map[string]*Schema{
"strWithDefault": &Schema{
Type: TypeString,
Default: "ImADefault",
},
"strWithDefaultFunc": &Schema{
Type: TypeString,
DefaultFunc: func() (interface{}, error) {
return "FuncDefault", nil
},
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Config *terraform.ResourceConfig
Err bool
}{
"gets default value when no config set": {
[]string{"strWithDefault"},
FieldReadResult{
Value: "ImADefault",
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{}),
false,
},
"config overrides default value": {
[]string{"strWithDefault"},
FieldReadResult{
Value: "fromConfig",
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"strWithDefault": "fromConfig",
}),
false,
},
"gets default from function when no config set": {
[]string{"strWithDefaultFunc"},
FieldReadResult{
Value: "FuncDefault",
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{}),
false,
},
"config overrides default function": {
[]string{"strWithDefaultFunc"},
FieldReadResult{
Value: "fromConfig",
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"strWithDefaultFunc": "fromConfig",
}),
false,
},
}
for name, tc := range cases {
r := &ConfigFieldReader{
Schema: schema,
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to a list so its more easily checked.
out.Value = s.List()
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}
func TestConfigFieldReader_ComputedMap(t *testing.T) {
schema := map[string]*Schema{
"map": &Schema{
Type: TypeMap,
Computed: true,
},
"listmap": &Schema{
Type: TypeMap,
Computed: true,
Elem: TypeList,
},
"maplist": &Schema{
Type: TypeList,
Computed: true,
Elem: TypeMap,
},
}
cases := []struct {
Name string
Addr []string
Result FieldReadResult
Config *terraform.ResourceConfig
Err bool
}{
{
"set, normal",
[]string{"map"},
FieldReadResult{
Value: map[string]interface{}{
"foo": "bar",
},
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"map": map[string]interface{}{
"foo": "bar",
},
}),
false,
},
{
"computed element",
[]string{"map"},
FieldReadResult{
Exists: true,
Computed: true,
},
testConfig(t, map[string]interface{}{
"map": map[string]interface{}{
"foo": hcl2shim.UnknownVariableValue,
},
}),
false,
},
{
"native map",
[]string{"map"},
FieldReadResult{
Value: map[string]interface{}{
"bar": "baz",
"baz": "bar",
},
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"map": map[string]interface{}{
"bar": "baz",
"baz": "bar",
},
}),
false,
},
{
"map-from-list-of-maps",
[]string{"maplist", "0"},
FieldReadResult{
Value: map[string]interface{}{
"key": "bar",
},
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"maplist": []interface{}{
map[string]interface{}{
"key": "bar",
},
},
}),
false,
},
{
"value-from-list-of-maps",
[]string{"maplist", "0", "key"},
FieldReadResult{
Value: "bar",
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"maplist": []interface{}{
map[string]interface{}{
"key": "bar",
},
},
}),
false,
},
{
"list-from-map-of-lists",
[]string{"listmap", "key"},
FieldReadResult{
Value: []interface{}{"bar"},
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"listmap": map[string]interface{}{
"key": []interface{}{
"bar",
},
},
}),
false,
},
{
"value-from-map-of-lists",
[]string{"listmap", "key", "0"},
FieldReadResult{
Value: "bar",
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"listmap": map[string]interface{}{
"key": []interface{}{
"bar",
},
},
}),
false,
},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) {
r := &ConfigFieldReader{
Schema: schema,
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatal(err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to the raw map
out.Value = s.m
if len(s.m) == 0 {
out.Value = nil
}
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("\nexpected: %#v\ngot: %#v", tc.Result, out)
}
})
}
}
func TestConfigFieldReader_ComputedSet(t *testing.T) {
schema := map[string]*Schema{
"strSet": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeString},
Set: HashString,
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Config *terraform.ResourceConfig
Err bool
}{
"set, normal": {
[]string{"strSet"},
FieldReadResult{
Value: map[string]interface{}{
"2356372769": "foo",
},
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"strSet": []interface{}{"foo"},
}),
false,
},
"set, computed element": {
[]string{"strSet"},
FieldReadResult{
Value: nil,
Exists: true,
Computed: true,
},
testConfig(t, map[string]interface{}{
"strSet": []interface{}{hcl2shim.UnknownVariableValue},
}),
false,
},
}
for name, tc := range cases {
r := &ConfigFieldReader{
Schema: schema,
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to the raw map
out.Value = s.m
if len(s.m) == 0 {
out.Value = nil
}
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}
func TestConfigFieldReader_computedComplexSet(t *testing.T) {
hashfunc := func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["vhd_uri"].(string)))
return hashcode.String(buf.String())
}
schema := map[string]*Schema{
"set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"name": {
Type: TypeString,
Required: true,
},
"vhd_uri": {
Type: TypeString,
Required: true,
},
},
},
Set: hashfunc,
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Config *terraform.ResourceConfig
Err bool
}{
"set, normal": {
[]string{"set"},
FieldReadResult{
Value: map[string]interface{}{
"532860136": map[string]interface{}{
"name": "myosdisk1",
"vhd_uri": "bar",
},
},
Exists: true,
Computed: false,
},
testConfig(t, map[string]interface{}{
"set": []interface{}{
map[string]interface{}{
"name": "myosdisk1",
"vhd_uri": "bar",
},
},
}),
false,
},
}
for name, tc := range cases {
r := &ConfigFieldReader{
Schema: schema,
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to the raw map
out.Value = s.m
if len(s.m) == 0 {
out.Value = nil
}
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}
func testConfig(t *testing.T, raw map[string]interface{}) *terraform.ResourceConfig {
return terraform.NewResourceConfigRaw(raw)
}

View File

@ -1,244 +0,0 @@
package schema
import (
"fmt"
"strings"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/mapstructure"
)
// DiffFieldReader reads fields out of a diff structures.
//
// It also requires access to a Reader that reads fields from the structure
// that the diff was derived from. This is usually the state. This is required
// because a diff on its own doesn't have complete data about full objects
// such as maps.
//
// The Source MUST be the data that the diff was derived from. If it isn't,
// the behavior of this struct is undefined.
//
// Reading fields from a DiffFieldReader is identical to reading from
// Source except the diff will be applied to the end result.
//
// The "Exists" field on the result will be set to true if the complete
// field exists whether its from the source, diff, or a combination of both.
// It cannot be determined whether a retrieved value is composed of
// diff elements.
type DiffFieldReader struct {
Diff *terraform.InstanceDiff
Source FieldReader
Schema map[string]*Schema
// cache for memoizing ReadField calls.
cache map[string]cachedFieldReadResult
}
type cachedFieldReadResult struct {
val FieldReadResult
err error
}
func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
if r.cache == nil {
r.cache = make(map[string]cachedFieldReadResult)
}
// Create the cache key by joining around a value that isn't a valid part
// of an address. This assumes that the Source and Schema are not changed
// for the life of this DiffFieldReader.
cacheKey := strings.Join(address, "|")
if cached, ok := r.cache[cacheKey]; ok {
return cached.val, cached.err
}
schemaList := addrToSchema(address, r.Schema)
if len(schemaList) == 0 {
r.cache[cacheKey] = cachedFieldReadResult{}
return FieldReadResult{}, nil
}
var res FieldReadResult
var err error
schema := schemaList[len(schemaList)-1]
switch schema.Type {
case TypeBool, TypeInt, TypeFloat, TypeString:
res, err = r.readPrimitive(address, schema)
case TypeList:
res, err = readListField(r, address, schema)
case TypeMap:
res, err = r.readMap(address, schema)
case TypeSet:
res, err = r.readSet(address, schema)
case typeObject:
res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema))
default:
panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
}
r.cache[cacheKey] = cachedFieldReadResult{
val: res,
err: err,
}
return res, err
}
func (r *DiffFieldReader) readMap(
address []string, schema *Schema) (FieldReadResult, error) {
result := make(map[string]interface{})
resultSet := false
// First read the map from the underlying source
source, err := r.Source.ReadField(address)
if err != nil {
return FieldReadResult{}, err
}
if source.Exists {
// readMap may return a nil value, or an unknown value placeholder in
// some cases, causing the type assertion to panic if we don't assign the ok value
result, _ = source.Value.(map[string]interface{})
resultSet = true
}
// Next, read all the elements we have in our diff, and apply
// the diff to our result.
prefix := strings.Join(address, ".") + "."
for k, v := range r.Diff.Attributes {
if !strings.HasPrefix(k, prefix) {
continue
}
if strings.HasPrefix(k, prefix+"%") {
// Ignore the count field
continue
}
resultSet = true
k = k[len(prefix):]
if v.NewRemoved {
delete(result, k)
continue
}
result[k] = v.New
}
key := address[len(address)-1]
err = mapValuesToPrimitive(key, result, schema)
if err != nil {
return FieldReadResult{}, nil
}
var resultVal interface{}
if resultSet {
resultVal = result
}
return FieldReadResult{
Value: resultVal,
Exists: resultSet,
}, nil
}
func (r *DiffFieldReader) readPrimitive(
address []string, schema *Schema) (FieldReadResult, error) {
result, err := r.Source.ReadField(address)
if err != nil {
return FieldReadResult{}, err
}
attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
if !ok {
return result, nil
}
var resultVal string
if !attrD.NewComputed {
resultVal = attrD.New
if attrD.NewExtra != nil {
result.ValueProcessed = resultVal
if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
return FieldReadResult{}, err
}
}
}
result.Computed = attrD.NewComputed
result.Exists = true
result.Value, err = stringToPrimitive(resultVal, false, schema)
if err != nil {
return FieldReadResult{}, err
}
return result, nil
}
func (r *DiffFieldReader) readSet(
address []string, schema *Schema) (FieldReadResult, error) {
// copy address to ensure we don't modify the argument
address = append([]string(nil), address...)
prefix := strings.Join(address, ".") + "."
// Create the set that will be our result
set := schema.ZeroValue().(*Set)
// Go through the map and find all the set items
for k, d := range r.Diff.Attributes {
if d.NewRemoved {
// If the field is removed, we always ignore it
continue
}
if !strings.HasPrefix(k, prefix) {
continue
}
if strings.HasSuffix(k, "#") {
// Ignore any count field
continue
}
// Split the key, since it might be a sub-object like "idx.field"
parts := strings.Split(k[len(prefix):], ".")
idx := parts[0]
raw, err := r.ReadField(append(address, idx))
if err != nil {
return FieldReadResult{}, err
}
if !raw.Exists {
// This shouldn't happen because we just verified it does exist
panic("missing field in set: " + k + "." + idx)
}
set.Add(raw.Value)
}
// Determine if the set "exists". It exists if there are items or if
// the diff explicitly wanted it empty.
exists := set.Len() > 0
if !exists {
// We could check if the diff value is "0" here but I think the
// existence of "#" on its own is enough to show it existed. This
// protects us in the future from the zero value changing from
// "0" to "" breaking us (if that were to happen).
if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
exists = true
}
}
if !exists {
result, err := r.Source.ReadField(address)
if err != nil {
return FieldReadResult{}, err
}
if result.Exists {
return result, nil
}
}
return FieldReadResult{
Value: set,
Exists: exists,
}, nil
}

View File

@ -1,524 +0,0 @@
package schema
import (
"reflect"
"testing"
"github.com/hashicorp/terraform/terraform"
)
func TestDiffFieldReader_impl(t *testing.T) {
var _ FieldReader = new(DiffFieldReader)
}
func TestDiffFieldReader_NestedSetUpdate(t *testing.T) {
hashFn := func(a interface{}) int {
m := a.(map[string]interface{})
return m["val"].(int)
}
schema := map[string]*Schema{
"list_of_sets_1": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"nested_set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"val": &Schema{
Type: TypeInt,
},
},
},
Set: hashFn,
},
},
},
},
"list_of_sets_2": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"nested_set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"val": &Schema{
Type: TypeInt,
},
},
},
Set: hashFn,
},
},
},
},
}
r := &DiffFieldReader{
Schema: schema,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"list_of_sets_1.0.nested_set.1.val": &terraform.ResourceAttrDiff{
Old: "1",
New: "0",
NewRemoved: true,
},
"list_of_sets_1.0.nested_set.2.val": &terraform.ResourceAttrDiff{
New: "2",
},
},
},
}
r.Source = &MultiLevelFieldReader{
Readers: map[string]FieldReader{
"diff": r,
"set": &MapFieldReader{Schema: schema},
"state": &MapFieldReader{
Map: &BasicMapReader{
"list_of_sets_1.#": "1",
"list_of_sets_1.0.nested_set.#": "1",
"list_of_sets_1.0.nested_set.1.val": "1",
"list_of_sets_2.#": "1",
"list_of_sets_2.0.nested_set.#": "1",
"list_of_sets_2.0.nested_set.1.val": "1",
},
Schema: schema,
},
},
Levels: []string{"state", "config"},
}
out, err := r.ReadField([]string{"list_of_sets_2"})
if err != nil {
t.Fatalf("err: %v", err)
}
s := &Set{F: hashFn}
s.Add(map[string]interface{}{"val": 1})
expected := s.List()
l := out.Value.([]interface{})
i := l[0].(map[string]interface{})
actual := i["nested_set"].(*Set).List()
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("bad: NestedSetUpdate\n\nexpected: %#v\n\ngot: %#v\n\n", expected, actual)
}
}
// https://github.com/hashicorp/terraform/issues/914
func TestDiffFieldReader_MapHandling(t *testing.T) {
schema := map[string]*Schema{
"tags": &Schema{
Type: TypeMap,
},
}
r := &DiffFieldReader{
Schema: schema,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"tags.%": &terraform.ResourceAttrDiff{
Old: "1",
New: "2",
},
"tags.baz": &terraform.ResourceAttrDiff{
Old: "",
New: "qux",
},
},
},
Source: &MapFieldReader{
Schema: schema,
Map: BasicMapReader(map[string]string{
"tags.%": "1",
"tags.foo": "bar",
}),
},
}
result, err := r.ReadField([]string{"tags"})
if err != nil {
t.Fatalf("ReadField failed: %#v", err)
}
expected := map[string]interface{}{
"foo": "bar",
"baz": "qux",
}
if !reflect.DeepEqual(expected, result.Value) {
t.Fatalf("bad: DiffHandling\n\nexpected: %#v\n\ngot: %#v\n\n", expected, result.Value)
}
}
func TestDiffFieldReader_extra(t *testing.T) {
schema := map[string]*Schema{
"stringComputed": &Schema{Type: TypeString},
"listMap": &Schema{
Type: TypeList,
Elem: &Schema{
Type: TypeMap,
},
},
"mapRemove": &Schema{Type: TypeMap},
"setChange": &Schema{
Type: TypeSet,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{
Type: TypeInt,
Required: true,
},
"value": &Schema{
Type: TypeString,
Required: true,
},
},
},
Set: func(a interface{}) int {
m := a.(map[string]interface{})
return m["index"].(int)
},
},
"setEmpty": &Schema{
Type: TypeSet,
Optional: true,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{
Type: TypeInt,
Required: true,
},
"value": &Schema{
Type: TypeString,
Required: true,
},
},
},
Set: func(a interface{}) int {
m := a.(map[string]interface{})
return m["index"].(int)
},
},
}
r := &DiffFieldReader{
Schema: schema,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"stringComputed": &terraform.ResourceAttrDiff{
Old: "foo",
New: "bar",
NewComputed: true,
},
"listMap.0.bar": &terraform.ResourceAttrDiff{
NewRemoved: true,
},
"mapRemove.bar": &terraform.ResourceAttrDiff{
NewRemoved: true,
},
"setChange.10.value": &terraform.ResourceAttrDiff{
Old: "50",
New: "80",
},
"setEmpty.#": &terraform.ResourceAttrDiff{
Old: "2",
New: "0",
},
},
},
Source: &MapFieldReader{
Schema: schema,
Map: BasicMapReader(map[string]string{
"listMap.#": "2",
"listMap.0.foo": "bar",
"listMap.0.bar": "baz",
"listMap.1.baz": "baz",
"mapRemove.foo": "bar",
"mapRemove.bar": "bar",
"setChange.#": "1",
"setChange.10.index": "10",
"setChange.10.value": "50",
"setEmpty.#": "2",
"setEmpty.10.index": "10",
"setEmpty.10.value": "50",
"setEmpty.20.index": "20",
"setEmpty.20.value": "50",
}),
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Err bool
}{
"stringComputed": {
[]string{"stringComputed"},
FieldReadResult{
Value: "",
Exists: true,
Computed: true,
},
false,
},
"listMapRemoval": {
[]string{"listMap"},
FieldReadResult{
Value: []interface{}{
map[string]interface{}{
"foo": "bar",
},
map[string]interface{}{
"baz": "baz",
},
},
Exists: true,
},
false,
},
"mapRemove": {
[]string{"mapRemove"},
FieldReadResult{
Value: map[string]interface{}{
"foo": "bar",
},
Exists: true,
Computed: false,
},
false,
},
"setChange": {
[]string{"setChange"},
FieldReadResult{
Value: []interface{}{
map[string]interface{}{
"index": 10,
"value": "80",
},
},
Exists: true,
},
false,
},
"setEmpty": {
[]string{"setEmpty"},
FieldReadResult{
Value: []interface{}{},
Exists: true,
},
false,
},
}
for name, tc := range cases {
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to a list so its more easily checked.
out.Value = s.List()
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}
func TestDiffFieldReader(t *testing.T) {
testFieldReader(t, func(s map[string]*Schema) FieldReader {
return &DiffFieldReader{
Schema: s,
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"bool": &terraform.ResourceAttrDiff{
Old: "",
New: "true",
},
"int": &terraform.ResourceAttrDiff{
Old: "",
New: "42",
},
"float": &terraform.ResourceAttrDiff{
Old: "",
New: "3.1415",
},
"string": &terraform.ResourceAttrDiff{
Old: "",
New: "string",
},
"stringComputed": &terraform.ResourceAttrDiff{
Old: "foo",
New: "bar",
NewComputed: true,
},
"list.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"list.0": &terraform.ResourceAttrDiff{
Old: "",
New: "foo",
},
"list.1": &terraform.ResourceAttrDiff{
Old: "",
New: "bar",
},
"listInt.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"listInt.0": &terraform.ResourceAttrDiff{
Old: "",
New: "21",
},
"listInt.1": &terraform.ResourceAttrDiff{
Old: "",
New: "42",
},
"map.foo": &terraform.ResourceAttrDiff{
Old: "",
New: "bar",
},
"map.bar": &terraform.ResourceAttrDiff{
Old: "",
New: "baz",
},
"mapInt.%": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapInt.one": &terraform.ResourceAttrDiff{
Old: "",
New: "1",
},
"mapInt.two": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapIntNestedSchema.%": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapIntNestedSchema.one": &terraform.ResourceAttrDiff{
Old: "",
New: "1",
},
"mapIntNestedSchema.two": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapFloat.%": &terraform.ResourceAttrDiff{
Old: "",
New: "1",
},
"mapFloat.oneDotTwo": &terraform.ResourceAttrDiff{
Old: "",
New: "1.2",
},
"mapBool.%": &terraform.ResourceAttrDiff{
Old: "",
New: "2",
},
"mapBool.True": &terraform.ResourceAttrDiff{
Old: "",
New: "true",
},
"mapBool.False": &terraform.ResourceAttrDiff{
Old: "",
New: "false",
},
"set.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"set.10": &terraform.ResourceAttrDiff{
Old: "",
New: "10",
},
"set.50": &terraform.ResourceAttrDiff{
Old: "",
New: "50",
},
"setDeep.#": &terraform.ResourceAttrDiff{
Old: "0",
New: "2",
},
"setDeep.10.index": &terraform.ResourceAttrDiff{
Old: "",
New: "10",
},
"setDeep.10.value": &terraform.ResourceAttrDiff{
Old: "",
New: "foo",
},
"setDeep.50.index": &terraform.ResourceAttrDiff{
Old: "",
New: "50",
},
"setDeep.50.value": &terraform.ResourceAttrDiff{
Old: "",
New: "bar",
},
},
},
Source: &MapFieldReader{
Schema: s,
Map: BasicMapReader(map[string]string{
"listMap.#": "2",
"listMap.0.foo": "bar",
"listMap.0.bar": "baz",
"listMap.1.baz": "baz",
}),
},
}
})
}

View File

@ -1,235 +0,0 @@
package schema
import (
"fmt"
"strings"
)
// MapFieldReader reads fields out of an untyped map[string]string to
// the best of its ability.
type MapFieldReader struct {
Map MapReader
Schema map[string]*Schema
}
func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
k := strings.Join(address, ".")
schemaList := addrToSchema(address, r.Schema)
if len(schemaList) == 0 {
return FieldReadResult{}, nil
}
schema := schemaList[len(schemaList)-1]
switch schema.Type {
case TypeBool, TypeInt, TypeFloat, TypeString:
return r.readPrimitive(address, schema)
case TypeList:
return readListField(r, address, schema)
case TypeMap:
return r.readMap(k, schema)
case TypeSet:
return r.readSet(address, schema)
case typeObject:
return readObjectField(r, address, schema.Elem.(map[string]*Schema))
default:
panic(fmt.Sprintf("Unknown type: %s", schema.Type))
}
}
func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
result := make(map[string]interface{})
resultSet := false
// If the name of the map field is directly in the map with an
// empty string, it means that the map is being deleted, so mark
// that is is set.
if v, ok := r.Map.Access(k); ok && v == "" {
resultSet = true
}
prefix := k + "."
r.Map.Range(func(k, v string) bool {
if strings.HasPrefix(k, prefix) {
resultSet = true
key := k[len(prefix):]
if key != "%" && key != "#" {
result[key] = v
}
}
return true
})
err := mapValuesToPrimitive(k, result, schema)
if err != nil {
return FieldReadResult{}, nil
}
var resultVal interface{}
if resultSet {
resultVal = result
}
return FieldReadResult{
Value: resultVal,
Exists: resultSet,
}, nil
}
func (r *MapFieldReader) readPrimitive(
address []string, schema *Schema) (FieldReadResult, error) {
k := strings.Join(address, ".")
result, ok := r.Map.Access(k)
if !ok {
return FieldReadResult{}, nil
}
returnVal, err := stringToPrimitive(result, false, schema)
if err != nil {
return FieldReadResult{}, err
}
return FieldReadResult{
Value: returnVal,
Exists: true,
}, nil
}
func (r *MapFieldReader) readSet(
address []string, schema *Schema) (FieldReadResult, error) {
// copy address to ensure we don't modify the argument
address = append([]string(nil), address...)
// Get the number of elements in the list
countRaw, err := r.readPrimitive(
append(address, "#"), &Schema{Type: TypeInt})
if err != nil {
return FieldReadResult{}, err
}
if !countRaw.Exists {
// No count, means we have no list
countRaw.Value = 0
}
// Create the set that will be our result
set := schema.ZeroValue().(*Set)
// If we have an empty list, then return an empty list
if countRaw.Computed || countRaw.Value.(int) == 0 {
return FieldReadResult{
Value: set,
Exists: countRaw.Exists,
Computed: countRaw.Computed,
}, nil
}
// Go through the map and find all the set items
prefix := strings.Join(address, ".") + "."
countExpected := countRaw.Value.(int)
countActual := make(map[string]struct{})
completed := r.Map.Range(func(k, _ string) bool {
if !strings.HasPrefix(k, prefix) {
return true
}
if strings.HasPrefix(k, prefix+"#") {
// Ignore the count field
return true
}
// Split the key, since it might be a sub-object like "idx.field"
parts := strings.Split(k[len(prefix):], ".")
idx := parts[0]
var raw FieldReadResult
raw, err = r.ReadField(append(address, idx))
if err != nil {
return false
}
if !raw.Exists {
// This shouldn't happen because we just verified it does exist
panic("missing field in set: " + k + "." + idx)
}
set.Add(raw.Value)
// Due to the way multimap readers work, if we've seen the number
// of fields we expect, then exit so that we don't read later values.
// For example: the "set" map might have "ports.#", "ports.0", and
// "ports.1", but the "state" map might have those plus "ports.2".
// We don't want "ports.2"
countActual[idx] = struct{}{}
if len(countActual) >= countExpected {
return false
}
return true
})
if !completed && err != nil {
return FieldReadResult{}, err
}
return FieldReadResult{
Value: set,
Exists: true,
}, nil
}
// MapReader is an interface that is given to MapFieldReader for accessing
// a "map". This can be used to have alternate implementations. For a basic
// map[string]string, use BasicMapReader.
type MapReader interface {
Access(string) (string, bool)
Range(func(string, string) bool) bool
}
// BasicMapReader implements MapReader for a single map.
type BasicMapReader map[string]string
func (r BasicMapReader) Access(k string) (string, bool) {
v, ok := r[k]
return v, ok
}
func (r BasicMapReader) Range(f func(string, string) bool) bool {
for k, v := range r {
if cont := f(k, v); !cont {
return false
}
}
return true
}
// MultiMapReader reads over multiple maps, preferring keys that are
// founder earlier (lower number index) vs. later (higher number index)
type MultiMapReader []map[string]string
func (r MultiMapReader) Access(k string) (string, bool) {
for _, m := range r {
if v, ok := m[k]; ok {
return v, ok
}
}
return "", false
}
func (r MultiMapReader) Range(f func(string, string) bool) bool {
done := make(map[string]struct{})
for _, m := range r {
for k, v := range m {
if _, ok := done[k]; ok {
continue
}
if cont := f(k, v); !cont {
return false
}
done[k] = struct{}{}
}
}
return true
}

View File

@ -1,123 +0,0 @@
package schema
import (
"reflect"
"testing"
)
func TestMapFieldReader_impl(t *testing.T) {
var _ FieldReader = new(MapFieldReader)
}
func TestMapFieldReader(t *testing.T) {
testFieldReader(t, func(s map[string]*Schema) FieldReader {
return &MapFieldReader{
Schema: s,
Map: BasicMapReader(map[string]string{
"bool": "true",
"int": "42",
"float": "3.1415",
"string": "string",
"list.#": "2",
"list.0": "foo",
"list.1": "bar",
"listInt.#": "2",
"listInt.0": "21",
"listInt.1": "42",
"map.%": "2",
"map.foo": "bar",
"map.bar": "baz",
"set.#": "2",
"set.10": "10",
"set.50": "50",
"setDeep.#": "2",
"setDeep.10.index": "10",
"setDeep.10.value": "foo",
"setDeep.50.index": "50",
"setDeep.50.value": "bar",
"mapInt.%": "2",
"mapInt.one": "1",
"mapInt.two": "2",
"mapIntNestedSchema.%": "2",
"mapIntNestedSchema.one": "1",
"mapIntNestedSchema.two": "2",
"mapFloat.%": "1",
"mapFloat.oneDotTwo": "1.2",
"mapBool.%": "2",
"mapBool.True": "true",
"mapBool.False": "false",
}),
}
})
}
func TestMapFieldReader_extra(t *testing.T) {
r := &MapFieldReader{
Schema: map[string]*Schema{
"mapDel": &Schema{Type: TypeMap},
"mapEmpty": &Schema{Type: TypeMap},
},
Map: BasicMapReader(map[string]string{
"mapDel": "",
"mapEmpty.%": "0",
}),
}
cases := map[string]struct {
Addr []string
Out interface{}
OutOk bool
OutComputed bool
OutErr bool
}{
"mapDel": {
[]string{"mapDel"},
map[string]interface{}{},
true,
false,
false,
},
"mapEmpty": {
[]string{"mapEmpty"},
map[string]interface{}{},
true,
false,
false,
},
}
for name, tc := range cases {
out, err := r.ReadField(tc.Addr)
if err != nil != tc.OutErr {
t.Fatalf("%s: err: %s", name, err)
}
if out.Computed != tc.OutComputed {
t.Fatalf("%s: err: %#v", name, out.Computed)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to a list so its more easily checked.
out.Value = s.List()
}
if !reflect.DeepEqual(out.Value, tc.Out) {
t.Fatalf("%s: out: %#v", name, out.Value)
}
if out.Exists != tc.OutOk {
t.Fatalf("%s: outOk: %#v", name, out.Exists)
}
}
}

View File

@ -1,63 +0,0 @@
package schema
import (
"fmt"
)
// MultiLevelFieldReader reads from other field readers,
// merging their results along the way in a specific order. You can specify
// "levels" and name them in order to read only an exact level or up to
// a specific level.
//
// This is useful for saying things such as "read the field from the state
// and config and merge them" or "read the latest value of the field".
type MultiLevelFieldReader struct {
Readers map[string]FieldReader
Levels []string
}
func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
}
func (r *MultiLevelFieldReader) ReadFieldExact(
address []string, level string) (FieldReadResult, error) {
reader, ok := r.Readers[level]
if !ok {
return FieldReadResult{}, fmt.Errorf(
"Unknown reader level: %s", level)
}
result, err := reader.ReadField(address)
if err != nil {
return FieldReadResult{}, fmt.Errorf(
"Error reading level %s: %s", level, err)
}
return result, nil
}
func (r *MultiLevelFieldReader) ReadFieldMerge(
address []string, level string) (FieldReadResult, error) {
var result FieldReadResult
for _, l := range r.Levels {
if r, ok := r.Readers[l]; ok {
out, err := r.ReadField(address)
if err != nil {
return FieldReadResult{}, fmt.Errorf(
"Error reading level %s: %s", l, err)
}
// TODO: computed
if out.Exists {
result = out
}
}
if l == level {
break
}
}
return result, nil
}

View File

@ -1,270 +0,0 @@
package schema
import (
"reflect"
"strconv"
"testing"
"github.com/hashicorp/terraform/terraform"
)
func TestMultiLevelFieldReaderReadFieldExact(t *testing.T) {
cases := map[string]struct {
Addr []string
Readers []FieldReader
Level string
Result FieldReadResult
}{
"specific": {
Addr: []string{"foo"},
Readers: []FieldReader{
&MapFieldReader{
Schema: map[string]*Schema{
"foo": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{
"foo": "bar",
}),
},
&MapFieldReader{
Schema: map[string]*Schema{
"foo": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{
"foo": "baz",
}),
},
&MapFieldReader{
Schema: map[string]*Schema{
"foo": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{}),
},
},
Level: "1",
Result: FieldReadResult{
Value: "baz",
Exists: true,
},
},
}
for name, tc := range cases {
readers := make(map[string]FieldReader)
levels := make([]string, len(tc.Readers))
for i, r := range tc.Readers {
is := strconv.FormatInt(int64(i), 10)
readers[is] = r
levels[i] = is
}
r := &MultiLevelFieldReader{
Readers: readers,
Levels: levels,
}
out, err := r.ReadFieldExact(tc.Addr, tc.Level)
if err != nil {
t.Fatalf("%s: err: %s", name, err)
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}
func TestMultiLevelFieldReaderReadFieldMerge(t *testing.T) {
cases := map[string]struct {
Addr []string
Readers []FieldReader
Result FieldReadResult
}{
"stringInDiff": {
Addr: []string{"availability_zone"},
Readers: []FieldReader{
&DiffFieldReader{
Schema: map[string]*Schema{
"availability_zone": &Schema{Type: TypeString},
},
Source: &MapFieldReader{
Schema: map[string]*Schema{
"availability_zone": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{
"availability_zone": "foo",
}),
},
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"availability_zone": &terraform.ResourceAttrDiff{
Old: "foo",
New: "bar",
RequiresNew: true,
},
},
},
},
},
Result: FieldReadResult{
Value: "bar",
Exists: true,
},
},
"lastLevelComputed": {
Addr: []string{"availability_zone"},
Readers: []FieldReader{
&MapFieldReader{
Schema: map[string]*Schema{
"availability_zone": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{
"availability_zone": "foo",
}),
},
&DiffFieldReader{
Schema: map[string]*Schema{
"availability_zone": &Schema{Type: TypeString},
},
Source: &MapFieldReader{
Schema: map[string]*Schema{
"availability_zone": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{
"availability_zone": "foo",
}),
},
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"availability_zone": &terraform.ResourceAttrDiff{
Old: "foo",
New: "bar",
NewComputed: true,
},
},
},
},
},
Result: FieldReadResult{
Value: "",
Exists: true,
Computed: true,
},
},
"list of maps with removal in diff": {
Addr: []string{"config_vars"},
Readers: []FieldReader{
&DiffFieldReader{
Schema: map[string]*Schema{
"config_vars": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeMap},
},
},
Source: &MapFieldReader{
Schema: map[string]*Schema{
"config_vars": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeMap},
},
},
Map: BasicMapReader(map[string]string{
"config_vars.#": "2",
"config_vars.0.foo": "bar",
"config_vars.0.bar": "bar",
"config_vars.1.bar": "baz",
}),
},
Diff: &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"config_vars.0.bar": &terraform.ResourceAttrDiff{
NewRemoved: true,
},
},
},
},
},
Result: FieldReadResult{
Value: []interface{}{
map[string]interface{}{
"foo": "bar",
},
map[string]interface{}{
"bar": "baz",
},
},
Exists: true,
},
},
"first level only": {
Addr: []string{"foo"},
Readers: []FieldReader{
&MapFieldReader{
Schema: map[string]*Schema{
"foo": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{
"foo": "bar",
}),
},
&MapFieldReader{
Schema: map[string]*Schema{
"foo": &Schema{Type: TypeString},
},
Map: BasicMapReader(map[string]string{}),
},
},
Result: FieldReadResult{
Value: "bar",
Exists: true,
},
},
}
for name, tc := range cases {
readers := make(map[string]FieldReader)
levels := make([]string, len(tc.Readers))
for i, r := range tc.Readers {
is := strconv.FormatInt(int64(i), 10)
readers[is] = r
levels[i] = is
}
r := &MultiLevelFieldReader{
Readers: readers,
Levels: levels,
}
out, err := r.ReadFieldMerge(tc.Addr, levels[len(levels)-1])
if err != nil {
t.Fatalf("%s: err: %s", name, err)
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}

View File

@ -1,471 +0,0 @@
package schema
import (
"reflect"
"testing"
)
func TestAddrToSchema(t *testing.T) {
cases := map[string]struct {
Addr []string
Schema map[string]*Schema
Result []ValueType
}{
"full object": {
[]string{},
map[string]*Schema{
"list": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeInt},
},
},
[]ValueType{typeObject},
},
"list": {
[]string{"list"},
map[string]*Schema{
"list": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeInt},
},
},
[]ValueType{TypeList},
},
"list.#": {
[]string{"list", "#"},
map[string]*Schema{
"list": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeInt},
},
},
[]ValueType{TypeList, TypeInt},
},
"list.0": {
[]string{"list", "0"},
map[string]*Schema{
"list": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeInt},
},
},
[]ValueType{TypeList, TypeInt},
},
"list.0 with resource": {
[]string{"list", "0"},
map[string]*Schema{
"list": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"field": &Schema{Type: TypeString},
},
},
},
},
[]ValueType{TypeList, typeObject},
},
"list.0.field": {
[]string{"list", "0", "field"},
map[string]*Schema{
"list": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"field": &Schema{Type: TypeString},
},
},
},
},
[]ValueType{TypeList, typeObject, TypeString},
},
"set": {
[]string{"set"},
map[string]*Schema{
"set": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeInt},
Set: func(a interface{}) int {
return a.(int)
},
},
},
[]ValueType{TypeSet},
},
"set.#": {
[]string{"set", "#"},
map[string]*Schema{
"set": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeInt},
Set: func(a interface{}) int {
return a.(int)
},
},
},
[]ValueType{TypeSet, TypeInt},
},
"set.0": {
[]string{"set", "0"},
map[string]*Schema{
"set": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeInt},
Set: func(a interface{}) int {
return a.(int)
},
},
},
[]ValueType{TypeSet, TypeInt},
},
"set.0 with resource": {
[]string{"set", "0"},
map[string]*Schema{
"set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"field": &Schema{Type: TypeString},
},
},
},
},
[]ValueType{TypeSet, typeObject},
},
"mapElem": {
[]string{"map", "foo"},
map[string]*Schema{
"map": &Schema{Type: TypeMap},
},
[]ValueType{TypeMap, TypeString},
},
"setDeep": {
[]string{"set", "50", "index"},
map[string]*Schema{
"set": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{Type: TypeInt},
"value": &Schema{Type: TypeString},
},
},
Set: func(a interface{}) int {
return a.(map[string]interface{})["index"].(int)
},
},
},
[]ValueType{TypeSet, typeObject, TypeInt},
},
}
for name, tc := range cases {
result := addrToSchema(tc.Addr, tc.Schema)
types := make([]ValueType, len(result))
for i, v := range result {
types[i] = v.Type
}
if !reflect.DeepEqual(types, tc.Result) {
t.Fatalf("%s: %#v", name, types)
}
}
}
// testFieldReader is a helper that should be used to verify that
// a FieldReader behaves properly in all the common cases.
func testFieldReader(t *testing.T, f func(map[string]*Schema) FieldReader) {
schema := map[string]*Schema{
// Primitives
"bool": &Schema{Type: TypeBool},
"float": &Schema{Type: TypeFloat},
"int": &Schema{Type: TypeInt},
"string": &Schema{Type: TypeString},
// Lists
"list": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeString},
},
"listInt": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeInt},
},
"listMap": &Schema{
Type: TypeList,
Elem: &Schema{
Type: TypeMap,
},
},
// Maps
"map": &Schema{Type: TypeMap},
"mapInt": &Schema{
Type: TypeMap,
Elem: TypeInt,
},
// This is used to verify that the type of a Map can be specified using the
// same syntax as for lists (as a nested *Schema passed to Elem)
"mapIntNestedSchema": &Schema{
Type: TypeMap,
Elem: &Schema{Type: TypeInt},
},
"mapFloat": &Schema{
Type: TypeMap,
Elem: TypeFloat,
},
"mapBool": &Schema{
Type: TypeMap,
Elem: TypeBool,
},
// Sets
"set": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeInt},
Set: func(a interface{}) int {
return a.(int)
},
},
"setDeep": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{Type: TypeInt},
"value": &Schema{Type: TypeString},
},
},
Set: func(a interface{}) int {
return a.(map[string]interface{})["index"].(int)
},
},
"setEmpty": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeInt},
Set: func(a interface{}) int {
return a.(int)
},
},
}
cases := map[string]struct {
Addr []string
Result FieldReadResult
Err bool
}{
"noexist": {
[]string{"boolNOPE"},
FieldReadResult{
Value: nil,
Exists: false,
Computed: false,
},
false,
},
"bool": {
[]string{"bool"},
FieldReadResult{
Value: true,
Exists: true,
Computed: false,
},
false,
},
"float": {
[]string{"float"},
FieldReadResult{
Value: 3.1415,
Exists: true,
Computed: false,
},
false,
},
"int": {
[]string{"int"},
FieldReadResult{
Value: 42,
Exists: true,
Computed: false,
},
false,
},
"string": {
[]string{"string"},
FieldReadResult{
Value: "string",
Exists: true,
Computed: false,
},
false,
},
"list": {
[]string{"list"},
FieldReadResult{
Value: []interface{}{
"foo",
"bar",
},
Exists: true,
Computed: false,
},
false,
},
"listInt": {
[]string{"listInt"},
FieldReadResult{
Value: []interface{}{
21,
42,
},
Exists: true,
Computed: false,
},
false,
},
"map": {
[]string{"map"},
FieldReadResult{
Value: map[string]interface{}{
"foo": "bar",
"bar": "baz",
},
Exists: true,
Computed: false,
},
false,
},
"mapInt": {
[]string{"mapInt"},
FieldReadResult{
Value: map[string]interface{}{
"one": 1,
"two": 2,
},
Exists: true,
Computed: false,
},
false,
},
"mapIntNestedSchema": {
[]string{"mapIntNestedSchema"},
FieldReadResult{
Value: map[string]interface{}{
"one": 1,
"two": 2,
},
Exists: true,
Computed: false,
},
false,
},
"mapFloat": {
[]string{"mapFloat"},
FieldReadResult{
Value: map[string]interface{}{
"oneDotTwo": 1.2,
},
Exists: true,
Computed: false,
},
false,
},
"mapBool": {
[]string{"mapBool"},
FieldReadResult{
Value: map[string]interface{}{
"True": true,
"False": false,
},
Exists: true,
Computed: false,
},
false,
},
"mapelem": {
[]string{"map", "foo"},
FieldReadResult{
Value: "bar",
Exists: true,
Computed: false,
},
false,
},
"set": {
[]string{"set"},
FieldReadResult{
Value: []interface{}{10, 50},
Exists: true,
Computed: false,
},
false,
},
"setDeep": {
[]string{"setDeep"},
FieldReadResult{
Value: []interface{}{
map[string]interface{}{
"index": 10,
"value": "foo",
},
map[string]interface{}{
"index": 50,
"value": "bar",
},
},
Exists: true,
Computed: false,
},
false,
},
"setEmpty": {
[]string{"setEmpty"},
FieldReadResult{
Value: []interface{}{},
Exists: false,
},
false,
},
}
for name, tc := range cases {
r := f(schema)
out, err := r.ReadField(tc.Addr)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
// If it is a set, convert to a list so its more easily checked.
out.Value = s.List()
}
if !reflect.DeepEqual(tc.Result, out) {
t.Fatalf("%s: bad: %#v", name, out)
}
}
}

View File

@ -1,8 +0,0 @@
package schema
// FieldWriters are responsible for writing fields by address into
// a proper typed representation. ResourceData uses this to write new data
// into existing sources.
type FieldWriter interface {
WriteField([]string, interface{}) error
}

View File

@ -1,357 +0,0 @@
package schema
import (
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"github.com/mitchellh/mapstructure"
)
// MapFieldWriter writes data into a single map[string]string structure.
type MapFieldWriter struct {
Schema map[string]*Schema
lock sync.Mutex
result map[string]string
}
// Map returns the underlying map that is being written to.
func (w *MapFieldWriter) Map() map[string]string {
w.lock.Lock()
defer w.lock.Unlock()
if w.result == nil {
w.result = make(map[string]string)
}
return w.result
}
func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
w.lock.Lock()
defer w.lock.Unlock()
if w.result == nil {
w.result = make(map[string]string)
}
w.result[addr] = value
}
// clearTree clears a field and any sub-fields of the given address out of the
// map. This should be used to reset some kind of complex structures (namely
// sets) before writing to make sure that any conflicting data is removed (for
// example, if the set was previously written to the writer's layer).
func (w *MapFieldWriter) clearTree(addr []string) {
prefix := strings.Join(addr, ".") + "."
for k := range w.result {
if strings.HasPrefix(k, prefix) {
delete(w.result, k)
}
}
}
func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
w.lock.Lock()
defer w.lock.Unlock()
if w.result == nil {
w.result = make(map[string]string)
}
schemaList := addrToSchema(addr, w.Schema)
if len(schemaList) == 0 {
return fmt.Errorf("Invalid address to set: %#v", addr)
}
// If we're setting anything other than a list root or set root,
// then disallow it.
for _, schema := range schemaList[:len(schemaList)-1] {
if schema.Type == TypeList {
return fmt.Errorf(
"%s: can only set full list",
strings.Join(addr, "."))
}
if schema.Type == TypeMap {
return fmt.Errorf(
"%s: can only set full map",
strings.Join(addr, "."))
}
if schema.Type == TypeSet {
return fmt.Errorf(
"%s: can only set full set",
strings.Join(addr, "."))
}
}
return w.set(addr, value)
}
func (w *MapFieldWriter) set(addr []string, value interface{}) error {
schemaList := addrToSchema(addr, w.Schema)
if len(schemaList) == 0 {
return fmt.Errorf("Invalid address to set: %#v", addr)
}
schema := schemaList[len(schemaList)-1]
switch schema.Type {
case TypeBool, TypeInt, TypeFloat, TypeString:
return w.setPrimitive(addr, value, schema)
case TypeList:
return w.setList(addr, value, schema)
case TypeMap:
return w.setMap(addr, value, schema)
case TypeSet:
return w.setSet(addr, value, schema)
case typeObject:
return w.setObject(addr, value, schema)
default:
panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
}
}
func (w *MapFieldWriter) setList(
addr []string,
v interface{},
schema *Schema) error {
k := strings.Join(addr, ".")
setElement := func(idx string, value interface{}) error {
addrCopy := make([]string, len(addr), len(addr)+1)
copy(addrCopy, addr)
return w.set(append(addrCopy, idx), value)
}
var vs []interface{}
if err := mapstructure.Decode(v, &vs); err != nil {
return fmt.Errorf("%s: %s", k, err)
}
// Wipe the set from the current writer prior to writing if it exists.
// Multiple writes to the same layer is a lot safer for lists than sets due
// to the fact that indexes are always deterministic and the length will
// always be updated with the current length on the last write, but making
// sure we have a clean namespace removes any chance for edge cases to pop up
// and ensures that the last write to the set is the correct value.
w.clearTree(addr)
// Set the entire list.
var err error
for i, elem := range vs {
is := strconv.FormatInt(int64(i), 10)
err = setElement(is, elem)
if err != nil {
break
}
}
if err != nil {
for i, _ := range vs {
is := strconv.FormatInt(int64(i), 10)
setElement(is, nil)
}
return err
}
w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
return nil
}
func (w *MapFieldWriter) setMap(
addr []string,
value interface{},
schema *Schema) error {
k := strings.Join(addr, ".")
v := reflect.ValueOf(value)
vs := make(map[string]interface{})
if value == nil {
// The empty string here means the map is removed.
w.result[k] = ""
return nil
}
if v.Kind() != reflect.Map {
return fmt.Errorf("%s: must be a map", k)
}
if v.Type().Key().Kind() != reflect.String {
return fmt.Errorf("%s: keys must strings", k)
}
for _, mk := range v.MapKeys() {
mv := v.MapIndex(mk)
vs[mk.String()] = mv.Interface()
}
// Wipe this address tree. The contents of the map should always reflect the
// last write made to it.
w.clearTree(addr)
// Remove the pure key since we're setting the full map value
delete(w.result, k)
// Set each subkey
addrCopy := make([]string, len(addr), len(addr)+1)
copy(addrCopy, addr)
for subKey, v := range vs {
if err := w.set(append(addrCopy, subKey), v); err != nil {
return err
}
}
// Set the count
w.result[k+".%"] = strconv.Itoa(len(vs))
return nil
}
func (w *MapFieldWriter) setObject(
addr []string,
value interface{},
schema *Schema) error {
// Set the entire object. First decode into a proper structure
var v map[string]interface{}
if err := mapstructure.Decode(value, &v); err != nil {
return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
}
// Make space for additional elements in the address
addrCopy := make([]string, len(addr), len(addr)+1)
copy(addrCopy, addr)
// Set each element in turn
var err error
for k1, v1 := range v {
if err = w.set(append(addrCopy, k1), v1); err != nil {
break
}
}
if err != nil {
for k1, _ := range v {
w.set(append(addrCopy, k1), nil)
}
}
return err
}
func (w *MapFieldWriter) setPrimitive(
addr []string,
v interface{},
schema *Schema) error {
k := strings.Join(addr, ".")
if v == nil {
// The empty string here means the value is removed.
w.result[k] = ""
return nil
}
var set string
switch schema.Type {
case TypeBool:
var b bool
if err := mapstructure.Decode(v, &b); err != nil {
return fmt.Errorf("%s: %s", k, err)
}
set = strconv.FormatBool(b)
case TypeString:
if err := mapstructure.Decode(v, &set); err != nil {
return fmt.Errorf("%s: %s", k, err)
}
case TypeInt:
var n int
if err := mapstructure.Decode(v, &n); err != nil {
return fmt.Errorf("%s: %s", k, err)
}
set = strconv.FormatInt(int64(n), 10)
case TypeFloat:
var n float64
if err := mapstructure.Decode(v, &n); err != nil {
return fmt.Errorf("%s: %s", k, err)
}
set = strconv.FormatFloat(float64(n), 'G', -1, 64)
default:
return fmt.Errorf("Unknown type: %#v", schema.Type)
}
w.result[k] = set
return nil
}
func (w *MapFieldWriter) setSet(
addr []string,
value interface{},
schema *Schema) error {
addrCopy := make([]string, len(addr), len(addr)+1)
copy(addrCopy, addr)
k := strings.Join(addr, ".")
if value == nil {
w.result[k+".#"] = "0"
return nil
}
// If it is a slice, then we have to turn it into a *Set so that
// we get the proper order back based on the hash code.
if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
// Build a temp *ResourceData to use for the conversion
tempAddr := addr[len(addr)-1:]
tempSchema := *schema
tempSchema.Type = TypeList
tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema}
tempW := &MapFieldWriter{Schema: tempSchemaMap}
// Set the entire list, this lets us get values out of it
if err := tempW.WriteField(tempAddr, value); err != nil {
return err
}
// Build the set by going over the list items in order and
// hashing them into the set. The reason we go over the list and
// not the `value` directly is because this forces all types
// to become []interface{} (generic) instead of []string, which
// most hash functions are expecting.
s := schema.ZeroValue().(*Set)
tempR := &MapFieldReader{
Map: BasicMapReader(tempW.Map()),
Schema: tempSchemaMap,
}
for i := 0; i < v.Len(); i++ {
is := strconv.FormatInt(int64(i), 10)
result, err := tempR.ReadField(append(tempAddr, is))
if err != nil {
return err
}
if !result.Exists {
panic("set item just set doesn't exist")
}
s.Add(result.Value)
}
value = s
}
// Clear any keys that match the set address first. This is necessary because
// it's always possible and sometimes may be necessary to write to a certain
// writer layer more than once with different set data each time, which will
// lead to different keys being inserted, which can lead to determinism
// problems when the old data isn't wiped first.
w.clearTree(addr)
if value.(*Set) == nil {
w.result[k+".#"] = "0"
return nil
}
for code, elem := range value.(*Set).m {
if err := w.set(append(addrCopy, code), elem); err != nil {
return err
}
}
w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
return nil
}

View File

@ -1,547 +0,0 @@
package schema
import (
"reflect"
"testing"
)
func TestMapFieldWriter_impl(t *testing.T) {
var _ FieldWriter = new(MapFieldWriter)
}
func TestMapFieldWriter(t *testing.T) {
schema := map[string]*Schema{
"bool": &Schema{Type: TypeBool},
"int": &Schema{Type: TypeInt},
"string": &Schema{Type: TypeString},
"list": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeString},
},
"listInt": &Schema{
Type: TypeList,
Elem: &Schema{Type: TypeInt},
},
"listResource": &Schema{
Type: TypeList,
Optional: true,
Computed: true,
Elem: &Resource{
Schema: map[string]*Schema{
"value": &Schema{
Type: TypeInt,
Optional: true,
},
},
},
},
"map": &Schema{Type: TypeMap},
"set": &Schema{
Type: TypeSet,
Elem: &Schema{Type: TypeInt},
Set: func(a interface{}) int {
return a.(int)
},
},
"setDeep": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{Type: TypeInt},
"value": &Schema{Type: TypeString},
},
},
Set: func(a interface{}) int {
return a.(map[string]interface{})["index"].(int)
},
},
}
cases := map[string]struct {
Addr []string
Value interface{}
Err bool
Out map[string]string
}{
"noexist": {
[]string{"noexist"},
42,
true,
map[string]string{},
},
"bool": {
[]string{"bool"},
false,
false,
map[string]string{
"bool": "false",
},
},
"int": {
[]string{"int"},
42,
false,
map[string]string{
"int": "42",
},
},
"string": {
[]string{"string"},
"42",
false,
map[string]string{
"string": "42",
},
},
"string nil": {
[]string{"string"},
nil,
false,
map[string]string{
"string": "",
},
},
"list of resources": {
[]string{"listResource"},
[]interface{}{
map[string]interface{}{
"value": 80,
},
},
false,
map[string]string{
"listResource.#": "1",
"listResource.0.value": "80",
},
},
"list of resources empty": {
[]string{"listResource"},
[]interface{}{},
false,
map[string]string{
"listResource.#": "0",
},
},
"list of resources nil": {
[]string{"listResource"},
nil,
false,
map[string]string{
"listResource.#": "0",
},
},
"list of strings": {
[]string{"list"},
[]interface{}{"foo", "bar"},
false,
map[string]string{
"list.#": "2",
"list.0": "foo",
"list.1": "bar",
},
},
"list element": {
[]string{"list", "0"},
"string",
true,
map[string]string{},
},
"map": {
[]string{"map"},
map[string]interface{}{"foo": "bar"},
false,
map[string]string{
"map.%": "1",
"map.foo": "bar",
},
},
"map delete": {
[]string{"map"},
nil,
false,
map[string]string{
"map": "",
},
},
"map element": {
[]string{"map", "foo"},
"bar",
true,
map[string]string{},
},
"set": {
[]string{"set"},
[]interface{}{1, 2, 5},
false,
map[string]string{
"set.#": "3",
"set.1": "1",
"set.2": "2",
"set.5": "5",
},
},
"set nil": {
[]string{"set"},
nil,
false,
map[string]string{
"set.#": "0",
},
},
"set typed nil": {
[]string{"set"},
func() *Set { return nil }(),
false,
map[string]string{
"set.#": "0",
},
},
"set resource": {
[]string{"setDeep"},
[]interface{}{
map[string]interface{}{
"index": 10,
"value": "foo",
},
map[string]interface{}{
"index": 50,
"value": "bar",
},
},
false,
map[string]string{
"setDeep.#": "2",
"setDeep.10.index": "10",
"setDeep.10.value": "foo",
"setDeep.50.index": "50",
"setDeep.50.value": "bar",
},
},
"set element": {
[]string{"set", "5"},
5,
true,
map[string]string{},
},
"full object": {
nil,
map[string]interface{}{
"string": "foo",
"list": []interface{}{"foo", "bar"},
},
false,
map[string]string{
"string": "foo",
"list.#": "2",
"list.0": "foo",
"list.1": "bar",
},
},
}
for name, tc := range cases {
w := &MapFieldWriter{Schema: schema}
err := w.WriteField(tc.Addr, tc.Value)
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
actual := w.Map()
if !reflect.DeepEqual(actual, tc.Out) {
t.Fatalf("%s: bad: %#v", name, actual)
}
}
}
func TestMapFieldWriterCleanSet(t *testing.T) {
schema := map[string]*Schema{
"setDeep": &Schema{
Type: TypeSet,
Elem: &Resource{
Schema: map[string]*Schema{
"index": &Schema{Type: TypeInt},
"value": &Schema{Type: TypeString},
},
},
Set: func(a interface{}) int {
return a.(map[string]interface{})["index"].(int)
},
},
}
values := []struct {
Addr []string
Value interface{}
Out map[string]string
}{
{
[]string{"setDeep"},
[]interface{}{
map[string]interface{}{
"index": 10,
"value": "foo",
},
map[string]interface{}{
"index": 50,
"value": "bar",
},
},
map[string]string{
"setDeep.#": "2",
"setDeep.10.index": "10",
"setDeep.10.value": "foo",
"setDeep.50.index": "50",
"setDeep.50.value": "bar",
},
},
{
[]string{"setDeep"},
[]interface{}{
map[string]interface{}{
"index": 20,
"value": "baz",
},
map[string]interface{}{
"index": 60,
"value": "qux",
},
},
map[string]string{
"setDeep.#": "2",
"setDeep.20.index": "20",
"setDeep.20.value": "baz",
"setDeep.60.index": "60",
"setDeep.60.value": "qux",
},
},
{
[]string{"setDeep"},
[]interface{}{
map[string]interface{}{
"index": 30,
"value": "one",
},
map[string]interface{}{
"index": 70,
"value": "two",
},
},
map[string]string{
"setDeep.#": "2",
"setDeep.30.index": "30",
"setDeep.30.value": "one",
"setDeep.70.index": "70",
"setDeep.70.value": "two",
},
},
}
w := &MapFieldWriter{Schema: schema}
for n, tc := range values {
err := w.WriteField(tc.Addr, tc.Value)
if err != nil {
t.Fatalf("%d: err: %s", n, err)
}
actual := w.Map()
if !reflect.DeepEqual(actual, tc.Out) {
t.Fatalf("%d: bad: %#v", n, actual)
}
}
}
func TestMapFieldWriterCleanList(t *testing.T) {
schema := map[string]*Schema{
"listDeep": &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"thing1": &Schema{Type: TypeString},
"thing2": &Schema{Type: TypeString},
},
},
},
}
values := []struct {
Addr []string
Value interface{}
Out map[string]string
}{
{
// Base list
[]string{"listDeep"},
[]interface{}{
map[string]interface{}{
"thing1": "a",
"thing2": "b",
},
map[string]interface{}{
"thing1": "c",
"thing2": "d",
},
map[string]interface{}{
"thing1": "e",
"thing2": "f",
},
map[string]interface{}{
"thing1": "g",
"thing2": "h",
},
},
map[string]string{
"listDeep.#": "4",
"listDeep.0.thing1": "a",
"listDeep.0.thing2": "b",
"listDeep.1.thing1": "c",
"listDeep.1.thing2": "d",
"listDeep.2.thing1": "e",
"listDeep.2.thing2": "f",
"listDeep.3.thing1": "g",
"listDeep.3.thing2": "h",
},
},
{
// Remove an element
[]string{"listDeep"},
[]interface{}{
map[string]interface{}{
"thing1": "a",
"thing2": "b",
},
map[string]interface{}{
"thing1": "c",
"thing2": "d",
},
map[string]interface{}{
"thing1": "e",
"thing2": "f",
},
},
map[string]string{
"listDeep.#": "3",
"listDeep.0.thing1": "a",
"listDeep.0.thing2": "b",
"listDeep.1.thing1": "c",
"listDeep.1.thing2": "d",
"listDeep.2.thing1": "e",
"listDeep.2.thing2": "f",
},
},
{
// Rewrite with missing keys. This should normally not be necessary, as
// hopefully the writers are writing zero values as necessary, but for
// brevity we want to make sure that what exists in the writer is exactly
// what the last write looked like coming from the provider.
[]string{"listDeep"},
[]interface{}{
map[string]interface{}{
"thing1": "a",
},
map[string]interface{}{
"thing1": "c",
},
map[string]interface{}{
"thing1": "e",
},
},
map[string]string{
"listDeep.#": "3",
"listDeep.0.thing1": "a",
"listDeep.1.thing1": "c",
"listDeep.2.thing1": "e",
},
},
}
w := &MapFieldWriter{Schema: schema}
for n, tc := range values {
err := w.WriteField(tc.Addr, tc.Value)
if err != nil {
t.Fatalf("%d: err: %s", n, err)
}
actual := w.Map()
if !reflect.DeepEqual(actual, tc.Out) {
t.Fatalf("%d: bad: %#v", n, actual)
}
}
}
func TestMapFieldWriterCleanMap(t *testing.T) {
schema := map[string]*Schema{
"map": &Schema{
Type: TypeMap,
},
}
values := []struct {
Value interface{}
Out map[string]string
}{
{
// Base map
map[string]interface{}{
"thing1": "a",
"thing2": "b",
"thing3": "c",
"thing4": "d",
},
map[string]string{
"map.%": "4",
"map.thing1": "a",
"map.thing2": "b",
"map.thing3": "c",
"map.thing4": "d",
},
},
{
// Base map
map[string]interface{}{
"thing1": "a",
"thing2": "b",
"thing4": "d",
},
map[string]string{
"map.%": "3",
"map.thing1": "a",
"map.thing2": "b",
"map.thing4": "d",
},
},
}
w := &MapFieldWriter{Schema: schema}
for n, tc := range values {
err := w.WriteField([]string{"map"}, tc.Value)
if err != nil {
t.Fatalf("%d: err: %s", n, err)
}
actual := w.Map()
if !reflect.DeepEqual(actual, tc.Out) {
t.Fatalf("%d: bad: %#v", n, actual)
}
}
}

View File

@ -1,46 +0,0 @@
// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
package schema
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[getSourceState-1]
_ = x[getSourceConfig-2]
_ = x[getSourceDiff-4]
_ = x[getSourceSet-8]
_ = x[getSourceExact-16]
_ = x[getSourceLevelMask-15]
}
const (
_getSource_name_0 = "getSourceStategetSourceConfig"
_getSource_name_1 = "getSourceDiff"
_getSource_name_2 = "getSourceSet"
_getSource_name_3 = "getSourceLevelMaskgetSourceExact"
)
var (
_getSource_index_0 = [...]uint8{0, 14, 29}
_getSource_index_3 = [...]uint8{0, 18, 32}
)
func (i getSource) String() string {
switch {
case 1 <= i && i <= 2:
i -= 1
return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
case i == 4:
return _getSource_name_1
case i == 8:
return _getSource_name_2
case 15 <= i && i <= 16:
i -= 15
return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
default:
return "getSource(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

View File

@ -1,477 +0,0 @@
package schema
import (
"context"
"errors"
"fmt"
"sort"
"sync"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/terraform"
)
var ReservedProviderFields = []string{
"alias",
"version",
}
// Provider represents a resource provider in Terraform, and properly
// implements all of the ResourceProvider API.
//
// By defining a schema for the configuration of the provider, the
// map of supporting resources, and a configuration function, the schema
// framework takes over and handles all the provider operations for you.
//
// After defining the provider structure, it is unlikely that you'll require any
// of the methods on Provider itself.
type Provider struct {
// Schema is the schema for the configuration of this provider. If this
// provider has no configuration, this can be omitted.
//
// The keys of this map are the configuration keys, and the value is
// the schema describing the value of the configuration.
Schema map[string]*Schema
// ResourcesMap is the list of available resources that this provider
// can manage, along with their Resource structure defining their
// own schemas and CRUD operations.
//
// Provider automatically handles routing operations such as Apply,
// Diff, etc. to the proper resource.
ResourcesMap map[string]*Resource
// DataSourcesMap is the collection of available data sources that
// this provider implements, with a Resource instance defining
// the schema and Read operation of each.
//
// Resource instances for data sources must have a Read function
// and must *not* implement Create, Update or Delete.
DataSourcesMap map[string]*Resource
// ProviderMetaSchema is the schema for the configuration of the meta
// information for this provider. If this provider has no meta info,
// this can be omitted. This functionality is currently experimental
// and subject to change or break without warning; it should only be
// used by providers that are collaborating on its use with the
// Terraform team.
ProviderMetaSchema map[string]*Schema
// ConfigureFunc is a function for configuring the provider. If the
// provider doesn't need to be configured, this can be omitted.
//
// See the ConfigureFunc documentation for more information.
ConfigureFunc ConfigureFunc
// MetaReset is called by TestReset to reset any state stored in the meta
// interface. This is especially important if the StopContext is stored by
// the provider.
MetaReset func() error
meta interface{}
// a mutex is required because TestReset can directly replace the stopCtx
stopMu sync.Mutex
stopCtx context.Context
stopCtxCancel context.CancelFunc
stopOnce sync.Once
TerraformVersion string
}
// ConfigureFunc is the function used to configure a Provider.
//
// The interface{} value returned by this function is stored and passed into
// the subsequent resources as the meta parameter. This return value is
// usually used to pass along a configured API client, a configuration
// structure, etc.
type ConfigureFunc func(*ResourceData) (interface{}, error)
// InternalValidate should be called to validate the structure
// of the provider.
//
// This should be called in a unit test for any provider to verify
// before release that a provider is properly configured for use with
// this library.
func (p *Provider) InternalValidate() error {
if p == nil {
return errors.New("provider is nil")
}
var validationErrors error
sm := schemaMap(p.Schema)
if err := sm.InternalValidate(sm); err != nil {
validationErrors = multierror.Append(validationErrors, err)
}
// Provider-specific checks
for k, _ := range sm {
if isReservedProviderFieldName(k) {
return fmt.Errorf("%s is a reserved field name for a provider", k)
}
}
for k, r := range p.ResourcesMap {
if err := r.InternalValidate(nil, true); err != nil {
validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
}
}
for k, r := range p.DataSourcesMap {
if err := r.InternalValidate(nil, false); err != nil {
validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
}
}
return validationErrors
}
func isReservedProviderFieldName(name string) bool {
for _, reservedName := range ReservedProviderFields {
if name == reservedName {
return true
}
}
return false
}
// Meta returns the metadata associated with this provider that was
// returned by the Configure call. It will be nil until Configure is called.
func (p *Provider) Meta() interface{} {
return p.meta
}
// SetMeta can be used to forcefully set the Meta object of the provider.
// Note that if Configure is called the return value will override anything
// set here.
func (p *Provider) SetMeta(v interface{}) {
p.meta = v
}
// Stopped reports whether the provider has been stopped or not.
func (p *Provider) Stopped() bool {
ctx := p.StopContext()
select {
case <-ctx.Done():
return true
default:
return false
}
}
// StopCh returns a channel that is closed once the provider is stopped.
func (p *Provider) StopContext() context.Context {
p.stopOnce.Do(p.stopInit)
p.stopMu.Lock()
defer p.stopMu.Unlock()
return p.stopCtx
}
func (p *Provider) stopInit() {
p.stopMu.Lock()
defer p.stopMu.Unlock()
p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
}
// Stop implementation of terraform.ResourceProvider interface.
func (p *Provider) Stop() error {
p.stopOnce.Do(p.stopInit)
p.stopMu.Lock()
defer p.stopMu.Unlock()
p.stopCtxCancel()
return nil
}
// TestReset resets any state stored in the Provider, and will call TestReset
// on Meta if it implements the TestProvider interface.
// This may be used to reset the schema.Provider at the start of a test, and is
// automatically called by resource.Test.
func (p *Provider) TestReset() error {
p.stopInit()
if p.MetaReset != nil {
return p.MetaReset()
}
return nil
}
// GetSchema implementation of terraform.ResourceProvider interface
func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) {
resourceTypes := map[string]*configschema.Block{}
dataSources := map[string]*configschema.Block{}
for _, name := range req.ResourceTypes {
if r, exists := p.ResourcesMap[name]; exists {
resourceTypes[name] = r.CoreConfigSchema()
}
}
for _, name := range req.DataSources {
if r, exists := p.DataSourcesMap[name]; exists {
dataSources[name] = r.CoreConfigSchema()
}
}
return &terraform.ProviderSchema{
Provider: schemaMap(p.Schema).CoreConfigSchema(),
ResourceTypes: resourceTypes,
DataSources: dataSources,
}, nil
}
// Input implementation of terraform.ResourceProvider interface.
func (p *Provider) Input(
input terraform.UIInput,
c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
return schemaMap(p.Schema).Input(input, c)
}
// Validate implementation of terraform.ResourceProvider interface.
func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
if err := p.InternalValidate(); err != nil {
return nil, []error{fmt.Errorf(
"Internal validation of the provider failed! This is always a bug\n"+
"with the provider itself, and not a user issue. Please report\n"+
"this bug:\n\n%s", err)}
}
return schemaMap(p.Schema).Validate(c)
}
// ValidateResource implementation of terraform.ResourceProvider interface.
func (p *Provider) ValidateResource(
t string, c *terraform.ResourceConfig) ([]string, []error) {
r, ok := p.ResourcesMap[t]
if !ok {
return nil, []error{fmt.Errorf(
"Provider doesn't support resource: %s", t)}
}
return r.Validate(c)
}
// Configure implementation of terraform.ResourceProvider interface.
func (p *Provider) Configure(c *terraform.ResourceConfig) error {
// No configuration
if p.ConfigureFunc == nil {
return nil
}
sm := schemaMap(p.Schema)
// Get a ResourceData for this configuration. To do this, we actually
// generate an intermediary "diff" although that is never exposed.
diff, err := sm.Diff(nil, c, nil, p.meta, true)
if err != nil {
return err
}
data, err := sm.Data(nil, diff)
if err != nil {
return err
}
meta, err := p.ConfigureFunc(data)
if err != nil {
return err
}
p.meta = meta
return nil
}
// Apply implementation of terraform.ResourceProvider interface.
func (p *Provider) Apply(
info *terraform.InstanceInfo,
s *terraform.InstanceState,
d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
r, ok := p.ResourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown resource type: %s", info.Type)
}
return r.Apply(s, d, p.meta)
}
// Diff implementation of terraform.ResourceProvider interface.
func (p *Provider) Diff(
info *terraform.InstanceInfo,
s *terraform.InstanceState,
c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
r, ok := p.ResourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown resource type: %s", info.Type)
}
return r.Diff(s, c, p.meta)
}
// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't
// attempt to calculate ignore_changes.
func (p *Provider) SimpleDiff(
info *terraform.InstanceInfo,
s *terraform.InstanceState,
c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
r, ok := p.ResourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown resource type: %s", info.Type)
}
return r.simpleDiff(s, c, p.meta)
}
// Refresh implementation of terraform.ResourceProvider interface.
func (p *Provider) Refresh(
info *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
r, ok := p.ResourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown resource type: %s", info.Type)
}
return r.Refresh(s, p.meta)
}
// Resources implementation of terraform.ResourceProvider interface.
func (p *Provider) Resources() []terraform.ResourceType {
keys := make([]string, 0, len(p.ResourcesMap))
for k := range p.ResourcesMap {
keys = append(keys, k)
}
sort.Strings(keys)
result := make([]terraform.ResourceType, 0, len(keys))
for _, k := range keys {
resource := p.ResourcesMap[k]
// This isn't really possible (it'd fail InternalValidate), but
// we do it anyways to avoid a panic.
if resource == nil {
resource = &Resource{}
}
result = append(result, terraform.ResourceType{
Name: k,
Importable: resource.Importer != nil,
// Indicates that a provider is compiled against a new enough
// version of core to support the GetSchema method.
SchemaAvailable: true,
})
}
return result
}
func (p *Provider) ImportState(
info *terraform.InstanceInfo,
id string) ([]*terraform.InstanceState, error) {
// Find the resource
r, ok := p.ResourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown resource type: %s", info.Type)
}
// If it doesn't support import, error
if r.Importer == nil {
return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
}
// Create the data
data := r.Data(nil)
data.SetId(id)
data.SetType(info.Type)
// Call the import function
results := []*ResourceData{data}
if r.Importer.State != nil {
var err error
results, err = r.Importer.State(data, p.meta)
if err != nil {
return nil, err
}
}
// Convert the results to InstanceState values and return it
states := make([]*terraform.InstanceState, len(results))
for i, r := range results {
states[i] = r.State()
}
// Verify that all are non-nil. If there are any nil the error
// isn't obvious so we circumvent that with a friendlier error.
for _, s := range states {
if s == nil {
return nil, fmt.Errorf(
"nil entry in ImportState results. This is always a bug with\n" +
"the resource that is being imported. Please report this as\n" +
"a bug to Terraform.")
}
}
return states, nil
}
// ValidateDataSource implementation of terraform.ResourceProvider interface.
func (p *Provider) ValidateDataSource(
t string, c *terraform.ResourceConfig) ([]string, []error) {
r, ok := p.DataSourcesMap[t]
if !ok {
return nil, []error{fmt.Errorf(
"Provider doesn't support data source: %s", t)}
}
return r.Validate(c)
}
// ReadDataDiff implementation of terraform.ResourceProvider interface.
func (p *Provider) ReadDataDiff(
info *terraform.InstanceInfo,
c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
r, ok := p.DataSourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown data source: %s", info.Type)
}
return r.Diff(nil, c, p.meta)
}
// RefreshData implementation of terraform.ResourceProvider interface.
func (p *Provider) ReadDataApply(
info *terraform.InstanceInfo,
d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
r, ok := p.DataSourcesMap[info.Type]
if !ok {
return nil, fmt.Errorf("unknown data source: %s", info.Type)
}
return r.ReadDataApply(d, p.meta)
}
// DataSources implementation of terraform.ResourceProvider interface.
func (p *Provider) DataSources() []terraform.DataSource {
keys := make([]string, 0, len(p.DataSourcesMap))
for k, _ := range p.DataSourcesMap {
keys = append(keys, k)
}
sort.Strings(keys)
result := make([]terraform.DataSource, 0, len(keys))
for _, k := range keys {
result = append(result, terraform.DataSource{
Name: k,
// Indicates that a provider is compiled against a new enough
// version of core to support the GetSchema method.
SchemaAvailable: true,
})
}
return result
}

View File

@ -1,620 +0,0 @@
package schema
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/terraform"
)
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = new(Provider)
}
func TestProviderGetSchema(t *testing.T) {
// This functionality is already broadly tested in core_schema_test.go,
// so this is just to ensure that the call passes through correctly.
p := &Provider{
Schema: map[string]*Schema{
"bar": {
Type: TypeString,
Required: true,
},
},
ResourcesMap: map[string]*Resource{
"foo": &Resource{
Schema: map[string]*Schema{
"bar": {
Type: TypeString,
Required: true,
},
},
},
},
DataSourcesMap: map[string]*Resource{
"baz": &Resource{
Schema: map[string]*Schema{
"bur": {
Type: TypeString,
Required: true,
},
},
},
},
}
want := &terraform.ProviderSchema{
Provider: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"bar": &configschema.Attribute{
Type: cty.String,
Required: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
},
ResourceTypes: map[string]*configschema.Block{
"foo": testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"bar": &configschema.Attribute{
Type: cty.String,
Required: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
DataSources: map[string]*configschema.Block{
"baz": testResource(&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"bur": &configschema.Attribute{
Type: cty.String,
Required: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{},
}),
},
}
got, err := p.GetSchema(&terraform.ProviderSchemaRequest{
ResourceTypes: []string{"foo", "bar"},
DataSources: []string{"baz", "bar"},
})
if err != nil {
t.Fatalf("unexpected error %s", err)
}
if !cmp.Equal(got, want, equateEmpty, typeComparer) {
t.Error("wrong result:\n", cmp.Diff(got, want, equateEmpty, typeComparer))
}
}
func TestProviderConfigure(t *testing.T) {
cases := []struct {
P *Provider
Config map[string]interface{}
Err bool
}{
{
P: &Provider{},
Config: nil,
Err: false,
},
{
P: &Provider{
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeInt,
Optional: true,
},
},
ConfigureFunc: func(d *ResourceData) (interface{}, error) {
if d.Get("foo").(int) == 42 {
return nil, nil
}
return nil, fmt.Errorf("nope")
},
},
Config: map[string]interface{}{
"foo": 42,
},
Err: false,
},
{
P: &Provider{
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeInt,
Optional: true,
},
},
ConfigureFunc: func(d *ResourceData) (interface{}, error) {
if d.Get("foo").(int) == 42 {
return nil, nil
}
return nil, fmt.Errorf("nope")
},
},
Config: map[string]interface{}{
"foo": 52,
},
Err: true,
},
}
for i, tc := range cases {
c := terraform.NewResourceConfigRaw(tc.Config)
err := tc.P.Configure(c)
if err != nil != tc.Err {
t.Fatalf("%d: %s", i, err)
}
}
}
func TestProviderResources(t *testing.T) {
cases := []struct {
P *Provider
Result []terraform.ResourceType
}{
{
P: &Provider{},
Result: []terraform.ResourceType{},
},
{
P: &Provider{
ResourcesMap: map[string]*Resource{
"foo": nil,
"bar": nil,
},
},
Result: []terraform.ResourceType{
terraform.ResourceType{Name: "bar", SchemaAvailable: true},
terraform.ResourceType{Name: "foo", SchemaAvailable: true},
},
},
{
P: &Provider{
ResourcesMap: map[string]*Resource{
"foo": nil,
"bar": &Resource{Importer: &ResourceImporter{}},
"baz": nil,
},
},
Result: []terraform.ResourceType{
terraform.ResourceType{Name: "bar", Importable: true, SchemaAvailable: true},
terraform.ResourceType{Name: "baz", SchemaAvailable: true},
terraform.ResourceType{Name: "foo", SchemaAvailable: true},
},
},
}
for i, tc := range cases {
actual := tc.P.Resources()
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("%d: %#v", i, actual)
}
}
}
func TestProviderDataSources(t *testing.T) {
cases := []struct {
P *Provider
Result []terraform.DataSource
}{
{
P: &Provider{},
Result: []terraform.DataSource{},
},
{
P: &Provider{
DataSourcesMap: map[string]*Resource{
"foo": nil,
"bar": nil,
},
},
Result: []terraform.DataSource{
terraform.DataSource{Name: "bar", SchemaAvailable: true},
terraform.DataSource{Name: "foo", SchemaAvailable: true},
},
},
}
for i, tc := range cases {
actual := tc.P.DataSources()
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("%d: got %#v; want %#v", i, actual, tc.Result)
}
}
}
func TestProviderValidate(t *testing.T) {
cases := []struct {
P *Provider
Config map[string]interface{}
Err bool
}{
{
P: &Provider{
Schema: map[string]*Schema{
"foo": &Schema{},
},
},
Config: nil,
Err: true,
},
}
for i, tc := range cases {
c := terraform.NewResourceConfigRaw(tc.Config)
_, es := tc.P.Validate(c)
if len(es) > 0 != tc.Err {
t.Fatalf("%d: %#v", i, es)
}
}
}
func TestProviderDiff_legacyTimeoutType(t *testing.T) {
p := &Provider{
ResourcesMap: map[string]*Resource{
"blah": &Resource{
Schema: map[string]*Schema{
"foo": {
Type: TypeInt,
Optional: true,
},
},
Timeouts: &ResourceTimeout{
Create: DefaultTimeout(10 * time.Minute),
},
},
},
}
invalidCfg := map[string]interface{}{
"foo": 42,
"timeouts": []interface{}{
map[string]interface{}{
"create": "40m",
},
},
}
ic := terraform.NewResourceConfigRaw(invalidCfg)
_, err := p.Diff(
&terraform.InstanceInfo{
Type: "blah",
},
nil,
ic,
)
if err != nil {
t.Fatal(err)
}
}
func TestProviderDiff_timeoutInvalidValue(t *testing.T) {
p := &Provider{
ResourcesMap: map[string]*Resource{
"blah": &Resource{
Schema: map[string]*Schema{
"foo": {
Type: TypeInt,
Optional: true,
},
},
Timeouts: &ResourceTimeout{
Create: DefaultTimeout(10 * time.Minute),
},
},
},
}
invalidCfg := map[string]interface{}{
"foo": 42,
"timeouts": map[string]interface{}{
"create": "invalid",
},
}
ic := terraform.NewResourceConfigRaw(invalidCfg)
_, err := p.Diff(
&terraform.InstanceInfo{
Type: "blah",
},
nil,
ic,
)
if err == nil {
t.Fatal("Expected provider.Diff to fail with invalid timeout value")
}
expectedErrMsg := `time: invalid duration "invalid"`
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Fatalf("Unexpected error message: %q\nExpected message to contain %q",
err.Error(),
expectedErrMsg)
}
}
func TestProviderValidateResource(t *testing.T) {
cases := []struct {
P *Provider
Type string
Config map[string]interface{}
Err bool
}{
{
P: &Provider{},
Type: "foo",
Config: nil,
Err: true,
},
{
P: &Provider{
ResourcesMap: map[string]*Resource{
"foo": &Resource{},
},
},
Type: "foo",
Config: nil,
Err: false,
},
}
for i, tc := range cases {
c := terraform.NewResourceConfigRaw(tc.Config)
_, es := tc.P.ValidateResource(tc.Type, c)
if len(es) > 0 != tc.Err {
t.Fatalf("%d: %#v", i, es)
}
}
}
func TestProviderImportState_default(t *testing.T) {
p := &Provider{
ResourcesMap: map[string]*Resource{
"foo": &Resource{
Importer: &ResourceImporter{},
},
},
}
states, err := p.ImportState(&terraform.InstanceInfo{
Type: "foo",
}, "bar")
if err != nil {
t.Fatalf("err: %s", err)
}
if len(states) != 1 {
t.Fatalf("bad: %#v", states)
}
if states[0].ID != "bar" {
t.Fatalf("bad: %#v", states)
}
}
func TestProviderImportState_setsId(t *testing.T) {
var val string
stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) {
val = d.Id()
return []*ResourceData{d}, nil
}
p := &Provider{
ResourcesMap: map[string]*Resource{
"foo": &Resource{
Importer: &ResourceImporter{
State: stateFunc,
},
},
},
}
_, err := p.ImportState(&terraform.InstanceInfo{
Type: "foo",
}, "bar")
if err != nil {
t.Fatalf("err: %s", err)
}
if val != "bar" {
t.Fatal("should set id")
}
}
func TestProviderImportState_setsType(t *testing.T) {
var tVal string
stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) {
d.SetId("foo")
tVal = d.State().Ephemeral.Type
return []*ResourceData{d}, nil
}
p := &Provider{
ResourcesMap: map[string]*Resource{
"foo": &Resource{
Importer: &ResourceImporter{
State: stateFunc,
},
},
},
}
_, err := p.ImportState(&terraform.InstanceInfo{
Type: "foo",
}, "bar")
if err != nil {
t.Fatalf("err: %s", err)
}
if tVal != "foo" {
t.Fatal("should set type")
}
}
func TestProviderMeta(t *testing.T) {
p := new(Provider)
if v := p.Meta(); v != nil {
t.Fatalf("bad: %#v", v)
}
expected := 42
p.SetMeta(42)
if v := p.Meta(); !reflect.DeepEqual(v, expected) {
t.Fatalf("bad: %#v", v)
}
}
func TestProviderStop(t *testing.T) {
var p Provider
if p.Stopped() {
t.Fatal("should not be stopped")
}
// Verify stopch blocks
ch := p.StopContext().Done()
select {
case <-ch:
t.Fatal("should not be stopped")
case <-time.After(10 * time.Millisecond):
}
// Stop it
if err := p.Stop(); err != nil {
t.Fatalf("err: %s", err)
}
// Verify
if !p.Stopped() {
t.Fatal("should be stopped")
}
select {
case <-ch:
case <-time.After(10 * time.Millisecond):
t.Fatal("should be stopped")
}
}
func TestProviderStop_stopFirst(t *testing.T) {
var p Provider
// Stop it
if err := p.Stop(); err != nil {
t.Fatalf("err: %s", err)
}
// Verify
if !p.Stopped() {
t.Fatal("should be stopped")
}
select {
case <-p.StopContext().Done():
case <-time.After(10 * time.Millisecond):
t.Fatal("should be stopped")
}
}
func TestProviderReset(t *testing.T) {
var p Provider
stopCtx := p.StopContext()
p.MetaReset = func() error {
stopCtx = p.StopContext()
return nil
}
// cancel the current context
p.Stop()
if err := p.TestReset(); err != nil {
t.Fatal(err)
}
// the first context should have been replaced
if err := stopCtx.Err(); err != nil {
t.Fatal(err)
}
// we should not get a canceled context here either
if err := p.StopContext().Err(); err != nil {
t.Fatal(err)
}
}
func TestProvider_InternalValidate(t *testing.T) {
cases := []struct {
P *Provider
ExpectedErr error
}{
{
P: &Provider{
Schema: map[string]*Schema{
"foo": {
Type: TypeBool,
Optional: true,
},
},
},
ExpectedErr: nil,
},
{ // Reserved resource fields should be allowed in provider block
P: &Provider{
Schema: map[string]*Schema{
"provisioner": {
Type: TypeString,
Optional: true,
},
"count": {
Type: TypeInt,
Optional: true,
},
},
},
ExpectedErr: nil,
},
{ // Reserved provider fields should not be allowed
P: &Provider{
Schema: map[string]*Schema{
"alias": {
Type: TypeString,
Optional: true,
},
},
},
ExpectedErr: fmt.Errorf("%s is a reserved field name for a provider", "alias"),
},
}
for i, tc := range cases {
err := tc.P.InternalValidate()
if tc.ExpectedErr == nil {
if err != nil {
t.Fatalf("%d: Error returned (expected no error): %s", i, err)
}
continue
}
if tc.ExpectedErr != nil && err == nil {
t.Fatalf("%d: Expected error (%s), but no error returned", i, tc.ExpectedErr)
}
if err.Error() != tc.ExpectedErr.Error() {
t.Fatalf("%d: Errors don't match. Expected: %#v Given: %#v", i, tc.ExpectedErr, err)
}
}
}

View File

@ -1,205 +0,0 @@
package schema
import (
"context"
"errors"
"fmt"
"sync"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/terraform"
)
// Provisioner represents a resource provisioner in Terraform and properly
// implements all of the ResourceProvisioner API.
//
// This higher level structure makes it much easier to implement a new or
// custom provisioner for Terraform.
//
// The function callbacks for this structure are all passed a context object.
// This context object has a number of pre-defined values that can be accessed
// via the global functions defined in context.go.
type Provisioner struct {
// ConnSchema is the schema for the connection settings for this
// provisioner.
//
// The keys of this map are the configuration keys, and the value is
// the schema describing the value of the configuration.
//
// NOTE: The value of connection keys can only be strings for now.
ConnSchema map[string]*Schema
// Schema is the schema for the usage of this provisioner.
//
// The keys of this map are the configuration keys, and the value is
// the schema describing the value of the configuration.
Schema map[string]*Schema
// ApplyFunc is the function for executing the provisioner. This is required.
// It is given a context. See the Provisioner struct docs for more
// information.
ApplyFunc func(ctx context.Context) error
// ValidateFunc is a function for extended validation. This is optional
// and should be used when individual field validation is not enough.
ValidateFunc func(*terraform.ResourceConfig) ([]string, []error)
stopCtx context.Context
stopCtxCancel context.CancelFunc
stopOnce sync.Once
}
// Keys that can be used to access data in the context parameters for
// Provisioners.
var (
connDataInvalid = contextKey("data invalid")
// This returns a *ResourceData for the connection information.
// Guaranteed to never be nil.
ProvConnDataKey = contextKey("provider conn data")
// This returns a *ResourceData for the config information.
// Guaranteed to never be nil.
ProvConfigDataKey = contextKey("provider config data")
// This returns a terraform.UIOutput. Guaranteed to never be nil.
ProvOutputKey = contextKey("provider output")
// This returns the raw InstanceState passed to Apply. Guaranteed to
// be set, but may be nil.
ProvRawStateKey = contextKey("provider raw state")
)
// InternalValidate should be called to validate the structure
// of the provisioner.
//
// This should be called in a unit test to verify before release that this
// structure is properly configured for use.
func (p *Provisioner) InternalValidate() error {
if p == nil {
return errors.New("provisioner is nil")
}
var validationErrors error
{
sm := schemaMap(p.ConnSchema)
if err := sm.InternalValidate(sm); err != nil {
validationErrors = multierror.Append(validationErrors, err)
}
}
{
sm := schemaMap(p.Schema)
if err := sm.InternalValidate(sm); err != nil {
validationErrors = multierror.Append(validationErrors, err)
}
}
if p.ApplyFunc == nil {
validationErrors = multierror.Append(validationErrors, fmt.Errorf(
"ApplyFunc must not be nil"))
}
return validationErrors
}
// StopContext returns a context that checks whether a provisioner is stopped.
func (p *Provisioner) StopContext() context.Context {
p.stopOnce.Do(p.stopInit)
return p.stopCtx
}
func (p *Provisioner) stopInit() {
p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
}
// Stop implementation of terraform.ResourceProvisioner interface.
func (p *Provisioner) Stop() error {
p.stopOnce.Do(p.stopInit)
p.stopCtxCancel()
return nil
}
// GetConfigSchema implementation of terraform.ResourceProvisioner interface.
func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) {
return schemaMap(p.Schema).CoreConfigSchema(), nil
}
// Apply implementation of terraform.ResourceProvisioner interface.
func (p *Provisioner) Apply(
o terraform.UIOutput,
s *terraform.InstanceState,
c *terraform.ResourceConfig) error {
var connData, configData *ResourceData
{
// We first need to turn the connection information into a
// terraform.ResourceConfig so that we can use that type to more
// easily build a ResourceData structure. We do this by simply treating
// the conn info as configuration input.
raw := make(map[string]interface{})
if s != nil {
for k, v := range s.Ephemeral.ConnInfo {
raw[k] = v
}
}
c := terraform.NewResourceConfigRaw(raw)
sm := schemaMap(p.ConnSchema)
diff, err := sm.Diff(nil, c, nil, nil, true)
if err != nil {
return err
}
connData, err = sm.Data(nil, diff)
if err != nil {
return err
}
}
{
// Build the configuration data. Doing this requires making a "diff"
// even though that's never used. We use that just to get the correct types.
configMap := schemaMap(p.Schema)
diff, err := configMap.Diff(nil, c, nil, nil, true)
if err != nil {
return err
}
configData, err = configMap.Data(nil, diff)
if err != nil {
return err
}
}
// Build the context and call the function
ctx := p.StopContext()
ctx = context.WithValue(ctx, ProvConnDataKey, connData)
ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
ctx = context.WithValue(ctx, ProvOutputKey, o)
ctx = context.WithValue(ctx, ProvRawStateKey, s)
return p.ApplyFunc(ctx)
}
// Validate implements the terraform.ResourceProvisioner interface.
func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) {
if err := p.InternalValidate(); err != nil {
return nil, []error{fmt.Errorf(
"Internal validation of the provisioner failed! This is always a bug\n"+
"with the provisioner itself, and not a user issue. Please report\n"+
"this bug:\n\n%s", err)}
}
if p.Schema != nil {
w, e := schemaMap(p.Schema).Validate(c)
ws = append(ws, w...)
es = append(es, e...)
}
if p.ValidateFunc != nil {
w, e := p.ValidateFunc(c)
ws = append(ws, w...)
es = append(es, e...)
}
return ws, es
}

View File

@ -1,334 +0,0 @@
package schema
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/hashicorp/terraform/terraform"
)
func TestProvisioner_impl(t *testing.T) {
var _ terraform.ResourceProvisioner = new(Provisioner)
}
func noopApply(ctx context.Context) error {
return nil
}
func TestProvisionerValidate(t *testing.T) {
cases := []struct {
Name string
P *Provisioner
Config map[string]interface{}
Err bool
Warns []string
}{
{
Name: "No ApplyFunc",
P: &Provisioner{},
Config: nil,
Err: true,
},
{
Name: "Incorrect schema",
P: &Provisioner{
Schema: map[string]*Schema{
"foo": {},
},
ApplyFunc: noopApply,
},
Config: nil,
Err: true,
},
{
"Basic required field",
&Provisioner{
Schema: map[string]*Schema{
"foo": &Schema{
Required: true,
Type: TypeString,
},
},
ApplyFunc: noopApply,
},
nil,
true,
nil,
},
{
"Basic required field set",
&Provisioner{
Schema: map[string]*Schema{
"foo": &Schema{
Required: true,
Type: TypeString,
},
},
ApplyFunc: noopApply,
},
map[string]interface{}{
"foo": "bar",
},
false,
nil,
},
{
Name: "Warning from property validation",
P: &Provisioner{
Schema: map[string]*Schema{
"foo": {
Type: TypeString,
Optional: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
ws = append(ws, "Simple warning from property validation")
return
},
},
},
ApplyFunc: noopApply,
},
Config: map[string]interface{}{
"foo": "",
},
Err: false,
Warns: []string{"Simple warning from property validation"},
},
{
Name: "No schema",
P: &Provisioner{
Schema: nil,
ApplyFunc: noopApply,
},
Config: nil,
Err: false,
},
{
Name: "Warning from provisioner ValidateFunc",
P: &Provisioner{
Schema: nil,
ApplyFunc: noopApply,
ValidateFunc: func(*terraform.ResourceConfig) (ws []string, errors []error) {
ws = append(ws, "Simple warning from provisioner ValidateFunc")
return
},
},
Config: nil,
Err: false,
Warns: []string{"Simple warning from provisioner ValidateFunc"},
},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) {
c := terraform.NewResourceConfigRaw(tc.Config)
ws, es := tc.P.Validate(c)
if len(es) > 0 != tc.Err {
t.Fatalf("%d: %#v %s", i, es, es)
}
if (tc.Warns != nil || len(ws) != 0) && !reflect.DeepEqual(ws, tc.Warns) {
t.Fatalf("%d: warnings mismatch, actual: %#v", i, ws)
}
})
}
}
func TestProvisionerApply(t *testing.T) {
cases := []struct {
Name string
P *Provisioner
Conn map[string]string
Config map[string]interface{}
Err bool
}{
{
"Basic config",
&Provisioner{
ConnSchema: map[string]*Schema{
"foo": &Schema{
Type: TypeString,
Optional: true,
},
},
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeInt,
Optional: true,
},
},
ApplyFunc: func(ctx context.Context) error {
cd := ctx.Value(ProvConnDataKey).(*ResourceData)
d := ctx.Value(ProvConfigDataKey).(*ResourceData)
if d.Get("foo").(int) != 42 {
return fmt.Errorf("bad config data")
}
if cd.Get("foo").(string) != "bar" {
return fmt.Errorf("bad conn data")
}
return nil
},
},
map[string]string{
"foo": "bar",
},
map[string]interface{}{
"foo": 42,
},
false,
},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) {
c := terraform.NewResourceConfigRaw(tc.Config)
state := &terraform.InstanceState{
Ephemeral: terraform.EphemeralState{
ConnInfo: tc.Conn,
},
}
err := tc.P.Apply(nil, state, c)
if err != nil != tc.Err {
t.Fatalf("%d: %s", i, err)
}
})
}
}
func TestProvisionerApply_nilState(t *testing.T) {
p := &Provisioner{
ConnSchema: map[string]*Schema{
"foo": &Schema{
Type: TypeString,
Optional: true,
},
},
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeInt,
Optional: true,
},
},
ApplyFunc: func(ctx context.Context) error {
return nil
},
}
conf := map[string]interface{}{
"foo": 42,
}
c := terraform.NewResourceConfigRaw(conf)
err := p.Apply(nil, nil, c)
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvisionerStop(t *testing.T) {
var p Provisioner
// Verify stopch blocks
ch := p.StopContext().Done()
select {
case <-ch:
t.Fatal("should not be stopped")
case <-time.After(10 * time.Millisecond):
}
// Stop it
if err := p.Stop(); err != nil {
t.Fatalf("err: %s", err)
}
select {
case <-ch:
case <-time.After(10 * time.Millisecond):
t.Fatal("should be stopped")
}
}
func TestProvisionerStop_apply(t *testing.T) {
p := &Provisioner{
ConnSchema: map[string]*Schema{
"foo": &Schema{
Type: TypeString,
Optional: true,
},
},
Schema: map[string]*Schema{
"foo": &Schema{
Type: TypeInt,
Optional: true,
},
},
ApplyFunc: func(ctx context.Context) error {
<-ctx.Done()
return nil
},
}
conn := map[string]string{
"foo": "bar",
}
conf := map[string]interface{}{
"foo": 42,
}
c := terraform.NewResourceConfigRaw(conf)
state := &terraform.InstanceState{
Ephemeral: terraform.EphemeralState{
ConnInfo: conn,
},
}
// Run the apply in a goroutine
doneCh := make(chan struct{})
go func() {
p.Apply(nil, state, c)
close(doneCh)
}()
// Should block
select {
case <-doneCh:
t.Fatal("should not be done")
case <-time.After(10 * time.Millisecond):
}
// Stop!
p.Stop()
select {
case <-doneCh:
case <-time.After(10 * time.Millisecond):
t.Fatal("should be done")
}
}
func TestProvisionerStop_stopFirst(t *testing.T) {
var p Provisioner
// Stop it
if err := p.Stop(); err != nil {
t.Fatalf("err: %s", err)
}
select {
case <-p.StopContext().Done():
case <-time.After(10 * time.Millisecond):
t.Fatal("should be stopped")
}
}

View File

@ -1,842 +0,0 @@
package schema
import (
"errors"
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/terraform"
"github.com/zclconf/go-cty/cty"
)
var ReservedDataSourceFields = []string{
"connection",
"count",
"depends_on",
"lifecycle",
"provider",
"provisioner",
}
var ReservedResourceFields = []string{
"connection",
"count",
"depends_on",
"id",
"lifecycle",
"provider",
"provisioner",
}
// Resource represents a thing in Terraform that has a set of configurable
// attributes and a lifecycle (create, read, update, delete).
//
// The Resource schema is an abstraction that allows provider writers to
// worry only about CRUD operations while off-loading validation, diff
// generation, etc. to this higher level library.
//
// In spite of the name, this struct is not used only for terraform resources,
// but also for data sources. In the case of data sources, the Create,
// Update and Delete functions must not be provided.
type Resource struct {
// Schema is the schema for the configuration of this resource.
//
// The keys of this map are the configuration keys, and the values
// describe the schema of the configuration value.
//
// The schema is used to represent both configurable data as well
// as data that might be computed in the process of creating this
// resource.
Schema map[string]*Schema
// SchemaVersion is the version number for this resource's Schema
// definition. The current SchemaVersion stored in the state for each
// resource. Provider authors can increment this version number
// when Schema semantics change. If the State's SchemaVersion is less than
// the current SchemaVersion, the InstanceState is yielded to the
// MigrateState callback, where the provider can make whatever changes it
// needs to update the state to be compatible to the latest version of the
// Schema.
//
// When unset, SchemaVersion defaults to 0, so provider authors can start
// their Versioning at any integer >= 1
SchemaVersion int
// MigrateState is deprecated and any new changes to a resource's schema
// should be handled by StateUpgraders. Existing MigrateState implementations
// should remain for compatibility with existing state. MigrateState will
// still be called if the stored SchemaVersion is less than the
// first version of the StateUpgraders.
//
// MigrateState is responsible for updating an InstanceState with an old
// version to the format expected by the current version of the Schema.
//
// It is called during Refresh if the State's stored SchemaVersion is less
// than the current SchemaVersion of the Resource.
//
// The function is yielded the state's stored SchemaVersion and a pointer to
// the InstanceState that needs updating, as well as the configured
// provider's configured meta interface{}, in case the migration process
// needs to make any remote API calls.
MigrateState StateMigrateFunc
// StateUpgraders contains the functions responsible for upgrading an
// existing state with an old schema version to a newer schema. It is
// called specifically by Terraform when the stored schema version is less
// than the current SchemaVersion of the Resource.
//
// StateUpgraders map specific schema versions to a StateUpgrader
// function. The registered versions are expected to be ordered,
// consecutive values. The initial value may be greater than 0 to account
// for legacy schemas that weren't recorded and can be handled by
// MigrateState.
StateUpgraders []StateUpgrader
// The functions below are the CRUD operations for this resource.
//
// The only optional operation is Update. If Update is not implemented,
// then updates will not be supported for this resource.
//
// The ResourceData parameter in the functions below are used to
// query configuration and changes for the resource as well as to set
// the ID, computed data, etc.
//
// The interface{} parameter is the result of the ConfigureFunc in
// the provider for this resource. If the provider does not define
// a ConfigureFunc, this will be nil. This parameter should be used
// to store API clients, configuration structures, etc.
//
// If any errors occur during each of the operation, an error should be
// returned. If a resource was partially updated, be careful to enable
// partial state mode for ResourceData and use it accordingly.
//
// Exists is a function that is called to check if a resource still
// exists. If this returns false, then this will affect the diff
// accordingly. If this function isn't set, it will not be called. You
// can also signal existence in the Read method by calling d.SetId("")
// if the Resource is no longer present and should be removed from state.
// The *ResourceData passed to Exists should _not_ be modified.
Create CreateFunc
Read ReadFunc
Update UpdateFunc
Delete DeleteFunc
Exists ExistsFunc
// CustomizeDiff is a custom function for working with the diff that
// Terraform has created for this resource - it can be used to customize the
// diff that has been created, diff values not controlled by configuration,
// or even veto the diff altogether and abort the plan. It is passed a
// *ResourceDiff, a structure similar to ResourceData but lacking most write
// functions like Set, while introducing new functions that work with the
// diff such as SetNew, SetNewComputed, and ForceNew.
//
// The phases Terraform runs this in, and the state available via functions
// like Get and GetChange, are as follows:
//
// * New resource: One run with no state
// * Existing resource: One run with state
// * Existing resource, forced new: One run with state (before ForceNew),
// then one run without state (as if new resource)
// * Tainted resource: No runs (custom diff logic is skipped)
// * Destroy: No runs (standard diff logic is skipped on destroy diffs)
//
// This function needs to be resilient to support all scenarios.
//
// If this function needs to access external API resources, remember to flag
// the RequiresRefresh attribute mentioned below to ensure that
// -refresh=false is blocked when running plan or apply, as this means that
// this resource requires refresh-like behaviour to work effectively.
//
// For the most part, only computed fields can be customized by this
// function.
//
// This function is only allowed on regular resources (not data sources).
CustomizeDiff CustomizeDiffFunc
// Importer is the ResourceImporter implementation for this resource.
// If this is nil, then this resource does not support importing. If
// this is non-nil, then it supports importing and ResourceImporter
// must be validated. The validity of ResourceImporter is verified
// by InternalValidate on Resource.
Importer *ResourceImporter
// If non-empty, this string is emitted as a warning during Validate.
DeprecationMessage string
// Timeouts allow users to specify specific time durations in which an
// operation should time out, to allow them to extend an action to suit their
// usage. For example, a user may specify a large Creation timeout for their
// AWS RDS Instance due to it's size, or restoring from a snapshot.
// Resource implementors must enable Timeout support by adding the allowed
// actions (Create, Read, Update, Delete, Default) to the Resource struct, and
// accessing them in the matching methods.
Timeouts *ResourceTimeout
}
// ShimInstanceStateFromValue converts a cty.Value to a
// terraform.InstanceState.
func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) {
// Get the raw shimmed value. While this is correct, the set hashes don't
// match those from the Schema.
s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion)
// We now rebuild the state through the ResourceData, so that the set indexes
// match what helper/schema expects.
data, err := schemaMap(r.Schema).Data(s, nil)
if err != nil {
return nil, err
}
s = data.State()
if s == nil {
s = &terraform.InstanceState{}
}
return s, nil
}
// See Resource documentation.
type CreateFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type ReadFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type UpdateFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type DeleteFunc func(*ResourceData, interface{}) error
// See Resource documentation.
type ExistsFunc func(*ResourceData, interface{}) (bool, error)
// See Resource documentation.
type StateMigrateFunc func(
int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
type StateUpgrader struct {
// Version is the version schema that this Upgrader will handle, converting
// it to Version+1.
Version int
// Type describes the schema that this function can upgrade. Type is
// required to decode the schema if the state was stored in a legacy
// flatmap format.
Type cty.Type
// Upgrade takes the JSON encoded state and the provider meta value, and
// upgrades the state one single schema version. The provided state is
// deocded into the default json types using a map[string]interface{}. It
// is up to the StateUpgradeFunc to ensure that the returned value can be
// encoded using the new schema.
Upgrade StateUpgradeFunc
}
// See StateUpgrader
type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error)
// See Resource documentation.
type CustomizeDiffFunc func(*ResourceDiff, interface{}) error
// Apply creates, updates, and/or deletes a resource.
func (r *Resource) Apply(
s *terraform.InstanceState,
d *terraform.InstanceDiff,
meta interface{}) (*terraform.InstanceState, error) {
data, err := schemaMap(r.Schema).Data(s, d)
if err != nil {
return s, err
}
if s != nil && data != nil {
data.providerMeta = s.ProviderMeta
}
// Instance Diff shoould have the timeout info, need to copy it over to the
// ResourceData meta
rt := ResourceTimeout{}
if _, ok := d.Meta[TimeoutKey]; ok {
if err := rt.DiffDecode(d); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
} else if s != nil {
if _, ok := s.Meta[TimeoutKey]; ok {
if err := rt.StateDecode(s); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
}
} else {
log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
}
data.timeouts = &rt
if s == nil {
// The Terraform API dictates that this should never happen, but
// it doesn't hurt to be safe in this case.
s = new(terraform.InstanceState)
}
if d.Destroy || d.RequiresNew() {
if s.ID != "" {
// Destroy the resource since it is created
if err := r.Delete(data, meta); err != nil {
return r.recordCurrentSchemaVersion(data.State()), err
}
// Make sure the ID is gone.
data.SetId("")
}
// If we're only destroying, and not creating, then return
// now since we're done!
if !d.RequiresNew() {
return nil, nil
}
// Reset the data to be stateless since we just destroyed
data, err = schemaMap(r.Schema).Data(nil, d)
// data was reset, need to re-apply the parsed timeouts
data.timeouts = &rt
if err != nil {
return nil, err
}
}
err = nil
if data.Id() == "" {
// We're creating, it is a new resource.
data.MarkNewResource()
err = r.Create(data, meta)
} else {
if r.Update == nil {
return s, fmt.Errorf("doesn't support update")
}
err = r.Update(data, meta)
}
return r.recordCurrentSchemaVersion(data.State()), err
}
// Diff returns a diff of this resource.
func (r *Resource) Diff(
s *terraform.InstanceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.InstanceDiff, error) {
t := &ResourceTimeout{}
err := t.ConfigDecode(r, c)
if err != nil {
return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
}
instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true)
if err != nil {
return instanceDiff, err
}
if instanceDiff != nil {
if err := t.DiffEncode(instanceDiff); err != nil {
log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
}
} else {
log.Printf("[DEBUG] Instance Diff is nil in Diff()")
}
return instanceDiff, err
}
func (r *Resource) simpleDiff(
s *terraform.InstanceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.InstanceDiff, error) {
instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false)
if err != nil {
return instanceDiff, err
}
if instanceDiff == nil {
instanceDiff = terraform.NewInstanceDiff()
}
// Make sure the old value is set in each of the instance diffs.
// This was done by the RequiresNew logic in the full legacy Diff.
for k, attr := range instanceDiff.Attributes {
if attr == nil {
continue
}
if s != nil {
attr.Old = s.Attributes[k]
}
}
return instanceDiff, nil
}
// Validate validates the resource configuration against the schema.
func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
warns, errs := schemaMap(r.Schema).Validate(c)
if r.DeprecationMessage != "" {
warns = append(warns, r.DeprecationMessage)
}
return warns, errs
}
// ReadDataApply loads the data for a data source, given a diff that
// describes the configuration arguments and desired computed attributes.
func (r *Resource) ReadDataApply(
d *terraform.InstanceDiff,
meta interface{},
) (*terraform.InstanceState, error) {
// Data sources are always built completely from scratch
// on each read, so the source state is always nil.
data, err := schemaMap(r.Schema).Data(nil, d)
if err != nil {
return nil, err
}
err = r.Read(data, meta)
state := data.State()
if state != nil && state.ID == "" {
// Data sources can set an ID if they want, but they aren't
// required to; we'll provide a placeholder if they don't,
// to preserve the invariant that all resources have non-empty
// ids.
state.ID = "-"
}
return r.recordCurrentSchemaVersion(state), err
}
// RefreshWithoutUpgrade reads the instance state, but does not call
// MigrateState or the StateUpgraders, since those are now invoked in a
// separate API call.
// RefreshWithoutUpgrade is part of the new plugin shims.
func (r *Resource) RefreshWithoutUpgrade(
s *terraform.InstanceState,
meta interface{}) (*terraform.InstanceState, error) {
// If the ID is already somehow blank, it doesn't exist
if s.ID == "" {
return nil, nil
}
rt := ResourceTimeout{}
if _, ok := s.Meta[TimeoutKey]; ok {
if err := rt.StateDecode(s); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
}
if r.Exists != nil {
// Make a copy of data so that if it is modified it doesn't
// affect our Read later.
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
if s != nil {
data.providerMeta = s.ProviderMeta
}
exists, err := r.Exists(data, meta)
if err != nil {
return s, err
}
if !exists {
return nil, nil
}
}
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
if s != nil {
data.providerMeta = s.ProviderMeta
}
err = r.Read(data, meta)
state := data.State()
if state != nil && state.ID == "" {
state = nil
}
return r.recordCurrentSchemaVersion(state), err
}
// Refresh refreshes the state of the resource.
func (r *Resource) Refresh(
s *terraform.InstanceState,
meta interface{}) (*terraform.InstanceState, error) {
// If the ID is already somehow blank, it doesn't exist
if s.ID == "" {
return nil, nil
}
rt := ResourceTimeout{}
if _, ok := s.Meta[TimeoutKey]; ok {
if err := rt.StateDecode(s); err != nil {
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
}
}
if r.Exists != nil {
// Make a copy of data so that if it is modified it doesn't
// affect our Read later.
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
exists, err := r.Exists(data, meta)
if err != nil {
return s, err
}
if !exists {
return nil, nil
}
}
// there may be new StateUpgraders that need to be run
s, err := r.upgradeState(s, meta)
if err != nil {
return s, err
}
data, err := schemaMap(r.Schema).Data(s, nil)
data.timeouts = &rt
if err != nil {
return s, err
}
err = r.Read(data, meta)
state := data.State()
if state != nil && state.ID == "" {
state = nil
}
return r.recordCurrentSchemaVersion(state), err
}
func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
var err error
needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
migrate := needsMigration && r.MigrateState != nil
if migrate {
s, err = r.MigrateState(stateSchemaVersion, s, meta)
if err != nil {
return s, err
}
}
if len(r.StateUpgraders) == 0 {
return s, nil
}
// If we ran MigrateState, then the stateSchemaVersion value is no longer
// correct. We can expect the first upgrade function to be the correct
// schema type version.
if migrate {
stateSchemaVersion = r.StateUpgraders[0].Version
}
schemaType := r.CoreConfigSchema().ImpliedType()
// find the expected type to convert the state
for _, upgrader := range r.StateUpgraders {
if stateSchemaVersion == upgrader.Version {
schemaType = upgrader.Type
}
}
// StateUpgraders only operate on the new JSON format state, so the state
// need to be converted.
stateVal, err := StateValueFromInstanceState(s, schemaType)
if err != nil {
return nil, err
}
jsonState, err := StateValueToJSONMap(stateVal, schemaType)
if err != nil {
return nil, err
}
for _, upgrader := range r.StateUpgraders {
if stateSchemaVersion != upgrader.Version {
continue
}
jsonState, err = upgrader.Upgrade(jsonState, meta)
if err != nil {
return nil, err
}
stateSchemaVersion++
}
// now we need to re-flatmap the new state
stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema())
if err != nil {
return nil, err
}
return r.ShimInstanceStateFromValue(stateVal)
}
// InternalValidate should be called to validate the structure
// of the resource.
//
// This should be called in a unit test for any resource to verify
// before release that a resource is properly configured for use with
// this library.
//
// Provider.InternalValidate() will automatically call this for all of
// the resources it manages, so you don't need to call this manually if it
// is part of a Provider.
func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
if r == nil {
return errors.New("resource is nil")
}
if !writable {
if r.Create != nil || r.Update != nil || r.Delete != nil {
return fmt.Errorf("must not implement Create, Update or Delete")
}
// CustomizeDiff cannot be defined for read-only resources
if r.CustomizeDiff != nil {
return fmt.Errorf("cannot implement CustomizeDiff")
}
}
tsm := topSchemaMap
if r.isTopLevel() && writable {
// All non-Computed attributes must be ForceNew if Update is not defined
if r.Update == nil {
nonForceNewAttrs := make([]string, 0)
for k, v := range r.Schema {
if !v.ForceNew && !v.Computed {
nonForceNewAttrs = append(nonForceNewAttrs, k)
}
}
if len(nonForceNewAttrs) > 0 {
return fmt.Errorf(
"No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
}
} else {
nonUpdateableAttrs := make([]string, 0)
for k, v := range r.Schema {
if v.ForceNew || v.Computed && !v.Optional {
nonUpdateableAttrs = append(nonUpdateableAttrs, k)
}
}
updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
if updateableAttrs == 0 {
return fmt.Errorf(
"All fields are ForceNew or Computed w/out Optional, Update is superfluous")
}
}
tsm = schemaMap(r.Schema)
// Destroy, and Read are required
if r.Read == nil {
return fmt.Errorf("Read must be implemented")
}
if r.Delete == nil {
return fmt.Errorf("Delete must be implemented")
}
// If we have an importer, we need to verify the importer.
if r.Importer != nil {
if err := r.Importer.InternalValidate(); err != nil {
return err
}
}
for k, f := range tsm {
if isReservedResourceFieldName(k, f) {
return fmt.Errorf("%s is a reserved field name", k)
}
}
}
lastVersion := -1
for _, u := range r.StateUpgraders {
if lastVersion >= 0 && u.Version-lastVersion > 1 {
return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version)
}
if u.Version >= r.SchemaVersion {
return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion)
}
if !u.Type.IsObjectType() {
return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version)
}
if u.Upgrade == nil {
return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version)
}
lastVersion = u.Version
}
if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 {
return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion)
}
// Data source
if r.isTopLevel() && !writable {
tsm = schemaMap(r.Schema)
for k, _ := range tsm {
if isReservedDataSourceFieldName(k) {
return fmt.Errorf("%s is a reserved field name", k)
}
}
}
return schemaMap(r.Schema).InternalValidate(tsm)
}
func isReservedDataSourceFieldName(name string) bool {
for _, reservedName := range ReservedDataSourceFields {
if name == reservedName {
return true
}
}
return false
}
func isReservedResourceFieldName(name string, s *Schema) bool {
// Allow phasing out "id"
// See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415
if name == "id" && (s.Deprecated != "" || s.Removed != "") {
return false
}
for _, reservedName := range ReservedResourceFields {
if name == reservedName {
return true
}
}
return false
}
// Data returns a ResourceData struct for this Resource. Each return value
// is a separate copy and can be safely modified differently.
//
// The data returned from this function has no actual affect on the Resource
// itself (including the state given to this function).
//
// This function is useful for unit tests and ResourceImporter functions.
func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
result, err := schemaMap(r.Schema).Data(s, nil)
if err != nil {
// At the time of writing, this isn't possible (Data never returns
// non-nil errors). We panic to find this in the future if we have to.
// I don't see a reason for Data to ever return an error.
panic(err)
}
// load the Resource timeouts
result.timeouts = r.Timeouts
if result.timeouts == nil {
result.timeouts = &ResourceTimeout{}
}
// Set the schema version to latest by default
result.meta = map[string]interface{}{
"schema_version": strconv.Itoa(r.SchemaVersion),
}
return result
}
// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
//
// TODO: May be able to be removed with the above ResourceData function.
func (r *Resource) TestResourceData() *ResourceData {
return &ResourceData{
schema: r.Schema,
}
}
// SchemasForFlatmapPath tries its best to find a sequence of schemas that
// the given dot-delimited attribute path traverses through in the schema
// of the receiving Resource.
func (r *Resource) SchemasForFlatmapPath(path string) []*Schema {
return SchemasForFlatmapPath(path, r.Schema)
}
// Returns true if the resource is "top level" i.e. not a sub-resource.
func (r *Resource) isTopLevel() bool {
// TODO: This is a heuristic; replace with a definitive attribute?
return (r.Create != nil || r.Read != nil)
}
// Determines if a given InstanceState needs to be migrated by checking the
// stored version number with the current SchemaVersion
func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
// Get the raw interface{} value for the schema version. If it doesn't
// exist or is nil then set it to zero.
raw := is.Meta["schema_version"]
if raw == nil {
raw = "0"
}
// Try to convert it to a string. If it isn't a string then we pretend
// that it isn't set at all. It should never not be a string unless it
// was manually tampered with.
rawString, ok := raw.(string)
if !ok {
rawString = "0"
}
stateSchemaVersion, _ := strconv.Atoi(rawString)
// Don't run MigrateState if the version is handled by a StateUpgrader,
// since StateMigrateFuncs are not required to handle unknown versions
maxVersion := r.SchemaVersion
if len(r.StateUpgraders) > 0 {
maxVersion = r.StateUpgraders[0].Version
}
return stateSchemaVersion < maxVersion, stateSchemaVersion
}
func (r *Resource) recordCurrentSchemaVersion(
state *terraform.InstanceState) *terraform.InstanceState {
if state != nil && r.SchemaVersion > 0 {
if state.Meta == nil {
state.Meta = make(map[string]interface{})
}
state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
}
return state
}
// Noop is a convenience implementation of resource function which takes
// no action and returns no error.
func Noop(*ResourceData, interface{}) error {
return nil
}
// RemoveFromState is a convenience implementation of a resource function
// which sets the resource ID to empty string (to remove it from state)
// and returns no error.
func RemoveFromState(d *ResourceData, _ interface{}) error {
d.SetId("")
return nil
}

View File

@ -1,561 +0,0 @@
package schema
import (
"log"
"reflect"
"strings"
"sync"
"time"
"github.com/hashicorp/terraform/terraform"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
)
// ResourceData is used to query and set the attributes of a resource.
//
// ResourceData is the primary argument received for CRUD operations on
// a resource as well as configuration of a provider. It is a powerful
// structure that can be used to not only query data, but check for changes,
// define partial state updates, etc.
//
// The most relevant methods to take a look at are Get, Set, and Partial.
type ResourceData struct {
// Settable (internally)
schema map[string]*Schema
config *terraform.ResourceConfig
state *terraform.InstanceState
diff *terraform.InstanceDiff
meta map[string]interface{}
timeouts *ResourceTimeout
providerMeta cty.Value
// Don't set
multiReader *MultiLevelFieldReader
setWriter *MapFieldWriter
newState *terraform.InstanceState
partial bool
partialMap map[string]struct{}
once sync.Once
isNew bool
panicOnError bool
}
// getResult is the internal structure that is generated when a Get
// is called that contains some extra data that might be used.
type getResult struct {
Value interface{}
ValueProcessed interface{}
Computed bool
Exists bool
Schema *Schema
}
// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
// values, bypassing schema. This MUST NOT be used in normal circumstances -
// it exists only to support the remote_state data source.
//
// Deprecated: Fully define schema attributes and use Set() instead.
func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
d.once.Do(d.init)
d.setWriter.unsafeWriteField(key, value)
}
// Get returns the data for the given key, or nil if the key doesn't exist
// in the schema.
//
// If the key does exist in the schema but doesn't exist in the configuration,
// then the default value for that type will be returned. For strings, this is
// "", for numbers it is 0, etc.
//
// If you want to test if something is set at all in the configuration,
// use GetOk.
func (d *ResourceData) Get(key string) interface{} {
v, _ := d.GetOk(key)
return v
}
// GetChange returns the old and new value for a given key.
//
// HasChange should be used to check if a change exists. It is possible
// that both the old and new value are the same if the old value was not
// set and the new value is. This is common, for example, for boolean
// fields which have a zero value of false.
func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
o, n := d.getChange(key, getSourceState, getSourceDiff)
return o.Value, n.Value
}
// GetOk returns the data for the given key and whether or not the key
// has been set to a non-zero value at some point.
//
// The first result will not necessarilly be nil if the value doesn't exist.
// The second result should be checked to determine this information.
func (d *ResourceData) GetOk(key string) (interface{}, bool) {
r := d.getRaw(key, getSourceSet)
exists := r.Exists && !r.Computed
if exists {
// If it exists, we also want to verify it is not the zero-value.
value := r.Value
zero := r.Schema.Type.Zero()
if eq, ok := value.(Equal); ok {
exists = !eq.Equal(zero)
} else {
exists = !reflect.DeepEqual(value, zero)
}
}
return r.Value, exists
}
// GetOkExists returns the data for a given key and whether or not the key
// has been set to a non-zero value. This is only useful for determining
// if boolean attributes have been set, if they are Optional but do not
// have a Default value.
//
// This is nearly the same function as GetOk, yet it does not check
// for the zero value of the attribute's type. This allows for attributes
// without a default, to fully check for a literal assignment, regardless
// of the zero-value for that type.
// This should only be used if absolutely required/needed.
func (d *ResourceData) GetOkExists(key string) (interface{}, bool) {
r := d.getRaw(key, getSourceSet)
exists := r.Exists && !r.Computed
return r.Value, exists
}
func (d *ResourceData) getRaw(key string, level getSource) getResult {
var parts []string
if key != "" {
parts = strings.Split(key, ".")
}
return d.get(parts, level)
}
// HasChange returns whether or not the given key has been changed.
func (d *ResourceData) HasChange(key string) bool {
o, n := d.GetChange(key)
// If the type implements the Equal interface, then call that
// instead of just doing a reflect.DeepEqual. An example where this is
// needed is *Set
if eq, ok := o.(Equal); ok {
return !eq.Equal(n)
}
return !reflect.DeepEqual(o, n)
}
// Partial turns partial state mode on/off.
//
// When partial state mode is enabled, then only key prefixes specified
// by SetPartial will be in the final state. This allows providers to return
// partial states for partially applied resources (when errors occur).
func (d *ResourceData) Partial(on bool) {
d.partial = on
if on {
if d.partialMap == nil {
d.partialMap = make(map[string]struct{})
}
} else {
d.partialMap = nil
}
}
// Set sets the value for the given key.
//
// If the key is invalid or the value is not a correct type, an error
// will be returned.
func (d *ResourceData) Set(key string, value interface{}) error {
d.once.Do(d.init)
// If the value is a pointer to a non-struct, get its value and
// use that. This allows Set to take a pointer to primitives to
// simplify the interface.
reflectVal := reflect.ValueOf(value)
if reflectVal.Kind() == reflect.Ptr {
if reflectVal.IsNil() {
// If the pointer is nil, then the value is just nil
value = nil
} else {
// Otherwise, we dereference the pointer as long as its not
// a pointer to a struct, since struct pointers are allowed.
reflectVal = reflect.Indirect(reflectVal)
if reflectVal.Kind() != reflect.Struct {
value = reflectVal.Interface()
}
}
}
err := d.setWriter.WriteField(strings.Split(key, "."), value)
if err != nil && d.panicOnError {
panic(err)
}
return err
}
// SetPartial adds the key to the final state output while
// in partial state mode. The key must be a root key in the schema (i.e.
// it cannot be "list.0").
//
// If partial state mode is disabled, then this has no effect. Additionally,
// whenever partial state mode is toggled, the partial data is cleared.
func (d *ResourceData) SetPartial(k string) {
if d.partial {
d.partialMap[k] = struct{}{}
}
}
func (d *ResourceData) MarkNewResource() {
d.isNew = true
}
func (d *ResourceData) IsNewResource() bool {
return d.isNew
}
// Id returns the ID of the resource.
func (d *ResourceData) Id() string {
var result string
if d.state != nil {
result = d.state.ID
if result == "" {
result = d.state.Attributes["id"]
}
}
if d.newState != nil {
result = d.newState.ID
if result == "" {
result = d.newState.Attributes["id"]
}
}
return result
}
// ConnInfo returns the connection info for this resource.
func (d *ResourceData) ConnInfo() map[string]string {
if d.newState != nil {
return d.newState.Ephemeral.ConnInfo
}
if d.state != nil {
return d.state.Ephemeral.ConnInfo
}
return nil
}
// SetId sets the ID of the resource. If the value is blank, then the
// resource is destroyed.
func (d *ResourceData) SetId(v string) {
d.once.Do(d.init)
d.newState.ID = v
// once we transition away from the legacy state types, "id" will no longer
// be a special field, and will become a normal attribute.
// set the attribute normally
d.setWriter.unsafeWriteField("id", v)
// Make sure the newState is also set, otherwise the old value
// may get precedence.
if d.newState.Attributes == nil {
d.newState.Attributes = map[string]string{}
}
d.newState.Attributes["id"] = v
}
// SetConnInfo sets the connection info for a resource.
func (d *ResourceData) SetConnInfo(v map[string]string) {
d.once.Do(d.init)
d.newState.Ephemeral.ConnInfo = v
}
// SetType sets the ephemeral type for the data. This is only required
// for importing.
func (d *ResourceData) SetType(t string) {
d.once.Do(d.init)
d.newState.Ephemeral.Type = t
}
// State returns the new InstanceState after the diff and any Set
// calls.
func (d *ResourceData) State() *terraform.InstanceState {
var result terraform.InstanceState
result.ID = d.Id()
result.Meta = d.meta
// If we have no ID, then this resource doesn't exist and we just
// return nil.
if result.ID == "" {
return nil
}
if d.timeouts != nil {
if err := d.timeouts.StateEncode(&result); err != nil {
log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
}
}
// Look for a magic key in the schema that determines we skip the
// integrity check of fields existing in the schema, allowing dynamic
// keys to be created.
hasDynamicAttributes := false
for k, _ := range d.schema {
if k == "__has_dynamic_attributes" {
hasDynamicAttributes = true
log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
}
}
// In order to build the final state attributes, we read the full
// attribute set as a map[string]interface{}, write it to a MapFieldWriter,
// and then use that map.
rawMap := make(map[string]interface{})
for k := range d.schema {
source := getSourceSet
if d.partial {
source = getSourceState
if _, ok := d.partialMap[k]; ok {
source = getSourceSet
}
}
raw := d.get([]string{k}, source)
if raw.Exists && !raw.Computed {
rawMap[k] = raw.Value
if raw.ValueProcessed != nil {
rawMap[k] = raw.ValueProcessed
}
}
}
mapW := &MapFieldWriter{Schema: d.schema}
if err := mapW.WriteField(nil, rawMap); err != nil {
log.Printf("[ERR] Error writing fields: %s", err)
return nil
}
result.Attributes = mapW.Map()
if hasDynamicAttributes {
// If we have dynamic attributes, just copy the attributes map
// one for one into the result attributes.
for k, v := range d.setWriter.Map() {
// Don't clobber schema values. This limits usage of dynamic
// attributes to names which _do not_ conflict with schema
// keys!
if _, ok := result.Attributes[k]; !ok {
result.Attributes[k] = v
}
}
}
if d.newState != nil {
result.Ephemeral = d.newState.Ephemeral
}
// TODO: This is hacky and we can remove this when we have a proper
// state writer. We should instead have a proper StateFieldWriter
// and use that.
for k, schema := range d.schema {
if schema.Type != TypeMap {
continue
}
if result.Attributes[k] == "" {
delete(result.Attributes, k)
}
}
if v := d.Id(); v != "" {
result.Attributes["id"] = d.Id()
}
if d.state != nil {
result.Tainted = d.state.Tainted
}
return &result
}
// Timeout returns the data for the given timeout key
// Returns a duration of 20 minutes for any key not found, or not found and no default.
func (d *ResourceData) Timeout(key string) time.Duration {
key = strings.ToLower(key)
// System default of 20 minutes
defaultTimeout := 20 * time.Minute
if d.timeouts == nil {
return defaultTimeout
}
var timeout *time.Duration
switch key {
case TimeoutCreate:
timeout = d.timeouts.Create
case TimeoutRead:
timeout = d.timeouts.Read
case TimeoutUpdate:
timeout = d.timeouts.Update
case TimeoutDelete:
timeout = d.timeouts.Delete
}
if timeout != nil {
return *timeout
}
if d.timeouts.Default != nil {
return *d.timeouts.Default
}
return defaultTimeout
}
func (d *ResourceData) init() {
// Initialize the field that will store our new state
var copyState terraform.InstanceState
if d.state != nil {
copyState = *d.state.DeepCopy()
}
d.newState = &copyState
// Initialize the map for storing set data
d.setWriter = &MapFieldWriter{Schema: d.schema}
// Initialize the reader for getting data from the
// underlying sources (config, diff, etc.)
readers := make(map[string]FieldReader)
var stateAttributes map[string]string
if d.state != nil {
stateAttributes = d.state.Attributes
readers["state"] = &MapFieldReader{
Schema: d.schema,
Map: BasicMapReader(stateAttributes),
}
}
if d.config != nil {
readers["config"] = &ConfigFieldReader{
Schema: d.schema,
Config: d.config,
}
}
if d.diff != nil {
readers["diff"] = &DiffFieldReader{
Schema: d.schema,
Diff: d.diff,
Source: &MultiLevelFieldReader{
Levels: []string{"state", "config"},
Readers: readers,
},
}
}
readers["set"] = &MapFieldReader{
Schema: d.schema,
Map: BasicMapReader(d.setWriter.Map()),
}
d.multiReader = &MultiLevelFieldReader{
Levels: []string{
"state",
"config",
"diff",
"set",
},
Readers: readers,
}
}
func (d *ResourceData) diffChange(
k string) (interface{}, interface{}, bool, bool, bool) {
// Get the change between the state and the config.
o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
if !o.Exists {
o.Value = nil
}
if !n.Exists {
n.Value = nil
}
// Return the old, new, and whether there is a change
return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false
}
func (d *ResourceData) getChange(
k string,
oldLevel getSource,
newLevel getSource) (getResult, getResult) {
var parts, parts2 []string
if k != "" {
parts = strings.Split(k, ".")
parts2 = strings.Split(k, ".")
}
o := d.get(parts, oldLevel)
n := d.get(parts2, newLevel)
return o, n
}
func (d *ResourceData) get(addr []string, source getSource) getResult {
d.once.Do(d.init)
level := "set"
flags := source & ^getSourceLevelMask
exact := flags&getSourceExact != 0
source = source & getSourceLevelMask
if source >= getSourceSet {
level = "set"
} else if source >= getSourceDiff {
level = "diff"
} else if source >= getSourceConfig {
level = "config"
} else {
level = "state"
}
var result FieldReadResult
var err error
if exact {
result, err = d.multiReader.ReadFieldExact(addr, level)
} else {
result, err = d.multiReader.ReadFieldMerge(addr, level)
}
if err != nil {
panic(err)
}
// If the result doesn't exist, then we set the value to the zero value
var schema *Schema
if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
schema = schemaL[len(schemaL)-1]
}
if result.Value == nil && schema != nil {
result.Value = result.ValueOrZero(schema)
}
// Transform the FieldReadResult into a getResult. It might be worth
// merging these two structures one day.
return getResult{
Value: result.Value,
ValueProcessed: result.ValueProcessed,
Computed: result.Computed,
Exists: result.Exists,
Schema: schema,
}
}
func (d *ResourceData) GetProviderMeta(dst interface{}) error {
if d.providerMeta.IsNull() {
return nil
}
return gocty.FromCtyValue(d.providerMeta, &dst)
}

View File

@ -1,17 +0,0 @@
package schema
//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go
// getSource represents the level we want to get for a value (internally).
// Any source less than or equal to the level will be loaded (whichever
// has a value first).
type getSource byte
const (
getSourceState getSource = 1 << iota
getSourceConfig
getSourceDiff
getSourceSet
getSourceExact // Only get from the _exact_ level
getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
)

File diff suppressed because it is too large Load Diff

View File

@ -1,559 +0,0 @@
package schema
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
"github.com/hashicorp/terraform/terraform"
)
// newValueWriter is a minor re-implementation of MapFieldWriter to include
// keys that should be marked as computed, to represent the new part of a
// pseudo-diff.
type newValueWriter struct {
*MapFieldWriter
// A list of keys that should be marked as computed.
computedKeys map[string]bool
// A lock to prevent races on writes. The underlying writer will have one as
// well - this is for computed keys.
lock sync.Mutex
// To be used with init.
once sync.Once
}
// init performs any initialization tasks for the newValueWriter.
func (w *newValueWriter) init() {
if w.computedKeys == nil {
w.computedKeys = make(map[string]bool)
}
}
// WriteField overrides MapValueWriter's WriteField, adding the ability to flag
// the address as computed.
func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error {
// Fail the write if we have a non-nil value and computed is true.
// NewComputed values should not have a value when written.
if value != nil && computed {
return errors.New("Non-nil value with computed set")
}
if err := w.MapFieldWriter.WriteField(address, value); err != nil {
return err
}
w.once.Do(w.init)
w.lock.Lock()
defer w.lock.Unlock()
if computed {
w.computedKeys[strings.Join(address, ".")] = true
}
return nil
}
// ComputedKeysMap returns the underlying computed keys map.
func (w *newValueWriter) ComputedKeysMap() map[string]bool {
w.once.Do(w.init)
return w.computedKeys
}
// newValueReader is a minor re-implementation of MapFieldReader and is the
// read counterpart to MapValueWriter, allowing the read of keys flagged as
// computed to accommodate the diff override logic in ResourceDiff.
type newValueReader struct {
*MapFieldReader
// The list of computed keys from a newValueWriter.
computedKeys map[string]bool
}
// ReadField reads the values from the underlying writer, returning the
// computed value if it is found as well.
func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) {
addrKey := strings.Join(address, ".")
v, err := r.MapFieldReader.ReadField(address)
if err != nil {
return FieldReadResult{}, err
}
for computedKey := range r.computedKeys {
if childAddrOf(addrKey, computedKey) {
if strings.HasSuffix(addrKey, ".#") {
// This is a count value for a list or set that has been marked as
// computed, or a sub-list/sub-set of a complex resource that has
// been marked as computed. We need to pass through to other readers
// so that an accurate previous count can be fetched for the diff.
v.Exists = false
}
v.Computed = true
}
}
return v, nil
}
// ResourceDiff is used to query and make custom changes to an in-flight diff.
// It can be used to veto particular changes in the diff, customize the diff
// that has been created, or diff values not controlled by config.
//
// The object functions similar to ResourceData, however most notably lacks
// Set, SetPartial, and Partial, as it should be used to change diff values
// only. Most other first-class ResourceData functions exist, namely Get,
// GetOk, HasChange, and GetChange exist.
//
// All functions in ResourceDiff, save for ForceNew, can only be used on
// computed fields.
type ResourceDiff struct {
// The schema for the resource being worked on.
schema map[string]*Schema
// The current config for this resource.
config *terraform.ResourceConfig
// The state for this resource as it exists post-refresh, after the initial
// diff.
state *terraform.InstanceState
// The diff created by Terraform. This diff is used, along with state,
// config, and custom-set diff data, to provide a multi-level reader
// experience similar to ResourceData.
diff *terraform.InstanceDiff
// The internal reader structure that contains the state, config, the default
// diff, and the new diff.
multiReader *MultiLevelFieldReader
// A writer that writes overridden new fields.
newWriter *newValueWriter
// Tracks which keys have been updated by ResourceDiff to ensure that the
// diff does not get re-run on keys that were not touched, or diffs that were
// just removed (re-running on the latter would just roll back the removal).
updatedKeys map[string]bool
// Tracks which keys were flagged as forceNew. These keys are not saved in
// newWriter, but we need to track them so that they can be re-diffed later.
forcedNewKeys map[string]bool
}
// newResourceDiff creates a new ResourceDiff instance.
func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff {
d := &ResourceDiff{
config: config,
state: state,
diff: diff,
schema: schema,
}
d.newWriter = &newValueWriter{
MapFieldWriter: &MapFieldWriter{Schema: d.schema},
}
readers := make(map[string]FieldReader)
var stateAttributes map[string]string
if d.state != nil {
stateAttributes = d.state.Attributes
readers["state"] = &MapFieldReader{
Schema: d.schema,
Map: BasicMapReader(stateAttributes),
}
}
if d.config != nil {
readers["config"] = &ConfigFieldReader{
Schema: d.schema,
Config: d.config,
}
}
if d.diff != nil {
readers["diff"] = &DiffFieldReader{
Schema: d.schema,
Diff: d.diff,
Source: &MultiLevelFieldReader{
Levels: []string{"state", "config"},
Readers: readers,
},
}
}
readers["newDiff"] = &newValueReader{
MapFieldReader: &MapFieldReader{
Schema: d.schema,
Map: BasicMapReader(d.newWriter.Map()),
},
computedKeys: d.newWriter.ComputedKeysMap(),
}
d.multiReader = &MultiLevelFieldReader{
Levels: []string{
"state",
"config",
"diff",
"newDiff",
},
Readers: readers,
}
d.updatedKeys = make(map[string]bool)
d.forcedNewKeys = make(map[string]bool)
return d
}
// UpdatedKeys returns the keys that were updated by this ResourceDiff run.
// These are the only keys that a diff should be re-calculated for.
//
// This is the combined result of both keys for which diff values were updated
// for or cleared, and also keys that were flagged to be re-diffed as a result
// of ForceNew.
func (d *ResourceDiff) UpdatedKeys() []string {
var s []string
for k := range d.updatedKeys {
s = append(s, k)
}
for k := range d.forcedNewKeys {
for _, l := range s {
if k == l {
break
}
}
s = append(s, k)
}
return s
}
// Clear wipes the diff for a particular key. It is called by ResourceDiff's
// functionality to remove any possibility of conflicts, but can be called on
// its own to just remove a specific key from the diff completely.
//
// Note that this does not wipe an override. This function is only allowed on
// computed keys.
func (d *ResourceDiff) Clear(key string) error {
if err := d.checkKey(key, "Clear", true); err != nil {
return err
}
return d.clear(key)
}
func (d *ResourceDiff) clear(key string) error {
// Check the schema to make sure that this key exists first.
schemaL := addrToSchema(strings.Split(key, "."), d.schema)
if len(schemaL) == 0 {
return fmt.Errorf("%s is not a valid key", key)
}
for k := range d.diff.Attributes {
if strings.HasPrefix(k, key) {
delete(d.diff.Attributes, k)
}
}
return nil
}
// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff
// where we need to act on all nested fields
// without calling out each one separately
func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string {
keys := make([]string, 0)
for k := range d.diff.Attributes {
if strings.HasPrefix(k, prefix) {
keys = append(keys, k)
}
}
return keys
}
// diffChange helps to implement resourceDiffer and derives its change values
// from ResourceDiff's own change data, in addition to existing diff, config, and state.
func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) {
old, new, customized := d.getChange(key)
if !old.Exists {
old.Value = nil
}
if !new.Exists || d.removed(key) {
new.Value = nil
}
return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized
}
// SetNew is used to set a new diff value for the mentioned key. The value must
// be correct for the attribute's schema (mostly relevant for maps, lists, and
// sets). The original value from the state is used as the old value.
//
// This function is only allowed on computed attributes.
func (d *ResourceDiff) SetNew(key string, value interface{}) error {
if err := d.checkKey(key, "SetNew", false); err != nil {
return err
}
return d.setDiff(key, value, false)
}
// SetNewComputed functions like SetNew, except that it blanks out a new value
// and marks it as computed.
//
// This function is only allowed on computed attributes.
func (d *ResourceDiff) SetNewComputed(key string) error {
if err := d.checkKey(key, "SetNewComputed", false); err != nil {
return err
}
return d.setDiff(key, nil, true)
}
// setDiff performs common diff setting behaviour.
func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error {
if err := d.clear(key); err != nil {
return err
}
if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil {
return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err)
}
d.updatedKeys[key] = true
return nil
}
// ForceNew force-flags ForceNew in the schema for a specific key, and
// re-calculates its diff, effectively causing this attribute to force a new
// resource.
//
// Keep in mind that forcing a new resource will force a second run of the
// resource's CustomizeDiff function (with a new ResourceDiff) once the current
// one has completed. This second run is performed without state. This behavior
// will be the same as if a new resource is being created and is performed to
// ensure that the diff looks like the diff for a new resource as much as
// possible. CustomizeDiff should expect such a scenario and act correctly.
//
// This function is a no-op/error if there is no diff.
//
// Note that the change to schema is permanent for the lifecycle of this
// specific ResourceDiff instance.
func (d *ResourceDiff) ForceNew(key string) error {
if !d.HasChange(key) {
return fmt.Errorf("ForceNew: No changes for %s", key)
}
keyParts := strings.Split(key, ".")
var schema *Schema
schemaL := addrToSchema(keyParts, d.schema)
if len(schemaL) > 0 {
schema = schemaL[len(schemaL)-1]
} else {
return fmt.Errorf("ForceNew: %s is not a valid key", key)
}
schema.ForceNew = true
// Flag this for a re-diff. Don't save any values to guarantee that existing
// diffs aren't messed with, as this gets messy when dealing with complex
// structures, zero values, etc.
d.forcedNewKeys[keyParts[0]] = true
return nil
}
// Get hands off to ResourceData.Get.
func (d *ResourceDiff) Get(key string) interface{} {
r, _ := d.GetOk(key)
return r
}
// GetChange gets the change between the state and diff, checking first to see
// if an overridden diff exists.
//
// This implementation differs from ResourceData's in the way that we first get
// results from the exact levels for the new diff, then from state and diff as
// per normal.
func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) {
old, new, _ := d.getChange(key)
return old.Value, new.Value
}
// GetOk functions the same way as ResourceData.GetOk, but it also checks the
// new diff levels to provide data consistent with the current state of the
// customized diff.
func (d *ResourceDiff) GetOk(key string) (interface{}, bool) {
r := d.get(strings.Split(key, "."), "newDiff")
exists := r.Exists && !r.Computed
if exists {
// If it exists, we also want to verify it is not the zero-value.
value := r.Value
zero := r.Schema.Type.Zero()
if eq, ok := value.(Equal); ok {
exists = !eq.Equal(zero)
} else {
exists = !reflect.DeepEqual(value, zero)
}
}
return r.Value, exists
}
// GetOkExists functions the same way as GetOkExists within ResourceData, but
// it also checks the new diff levels to provide data consistent with the
// current state of the customized diff.
//
// This is nearly the same function as GetOk, yet it does not check
// for the zero value of the attribute's type. This allows for attributes
// without a default, to fully check for a literal assignment, regardless
// of the zero-value for that type.
func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) {
r := d.get(strings.Split(key, "."), "newDiff")
exists := r.Exists && !r.Computed
return r.Value, exists
}
// NewValueKnown returns true if the new value for the given key is available
// as its final value at diff time. If the return value is false, this means
// either the value is based of interpolation that was unavailable at diff
// time, or that the value was explicitly marked as computed by SetNewComputed.
func (d *ResourceDiff) NewValueKnown(key string) bool {
r := d.get(strings.Split(key, "."), "newDiff")
return !r.Computed
}
// HasChange checks to see if there is a change between state and the diff, or
// in the overridden diff.
func (d *ResourceDiff) HasChange(key string) bool {
old, new := d.GetChange(key)
// If the type implements the Equal interface, then call that
// instead of just doing a reflect.DeepEqual. An example where this is
// needed is *Set
if eq, ok := old.(Equal); ok {
return !eq.Equal(new)
}
return !reflect.DeepEqual(old, new)
}
// Id returns the ID of this resource.
//
// Note that technically, ID does not change during diffs (it either has
// already changed in the refresh, or will change on update), hence we do not
// support updating the ID or fetching it from anything else other than state.
func (d *ResourceDiff) Id() string {
var result string
if d.state != nil {
result = d.state.ID
}
return result
}
// getChange gets values from two different levels, designed for use in
// diffChange, HasChange, and GetChange.
//
// This implementation differs from ResourceData's in the way that we first get
// results from the exact levels for the new diff, then from state and diff as
// per normal.
func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) {
old := d.get(strings.Split(key, "."), "state")
var new getResult
for p := range d.updatedKeys {
if childAddrOf(key, p) {
new = d.getExact(strings.Split(key, "."), "newDiff")
return old, new, true
}
}
new = d.get(strings.Split(key, "."), "newDiff")
return old, new, false
}
// removed checks to see if the key is present in the existing, pre-customized
// diff and if it was marked as NewRemoved.
func (d *ResourceDiff) removed(k string) bool {
diff, ok := d.diff.Attributes[k]
if !ok {
return false
}
return diff.NewRemoved
}
// get performs the appropriate multi-level reader logic for ResourceDiff,
// starting at source. Refer to newResourceDiff for the level order.
func (d *ResourceDiff) get(addr []string, source string) getResult {
result, err := d.multiReader.ReadFieldMerge(addr, source)
if err != nil {
panic(err)
}
return d.finalizeResult(addr, result)
}
// getExact gets an attribute from the exact level referenced by source.
func (d *ResourceDiff) getExact(addr []string, source string) getResult {
result, err := d.multiReader.ReadFieldExact(addr, source)
if err != nil {
panic(err)
}
return d.finalizeResult(addr, result)
}
// finalizeResult does some post-processing of the result produced by get and getExact.
func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult {
// If the result doesn't exist, then we set the value to the zero value
var schema *Schema
if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
schema = schemaL[len(schemaL)-1]
}
if result.Value == nil && schema != nil {
result.Value = result.ValueOrZero(schema)
}
// Transform the FieldReadResult into a getResult. It might be worth
// merging these two structures one day.
return getResult{
Value: result.Value,
ValueProcessed: result.ValueProcessed,
Computed: result.Computed,
Exists: result.Exists,
Schema: schema,
}
}
// childAddrOf does a comparison of two addresses to see if one is the child of
// the other.
func childAddrOf(child, parent string) bool {
cs := strings.Split(child, ".")
ps := strings.Split(parent, ".")
if len(ps) > len(cs) {
return false
}
return reflect.DeepEqual(ps, cs[:len(ps)])
}
// checkKey checks the key to make sure it exists and is computed.
func (d *ResourceDiff) checkKey(key, caller string, nested bool) error {
var schema *Schema
if nested {
keyParts := strings.Split(key, ".")
schemaL := addrToSchema(keyParts, d.schema)
if len(schemaL) > 0 {
schema = schemaL[len(schemaL)-1]
}
} else {
s, ok := d.schema[key]
if ok {
schema = s
}
}
if schema == nil {
return fmt.Errorf("%s: invalid key: %s", caller, key)
}
if !schema.Computed {
return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,52 +0,0 @@
package schema
// ResourceImporter defines how a resource is imported in Terraform. This
// can be set onto a Resource struct to make it Importable. Not all resources
// have to be importable; if a Resource doesn't have a ResourceImporter then
// it won't be importable.
//
// "Importing" in Terraform is the process of taking an already-created
// resource and bringing it under Terraform management. This can include
// updating Terraform state, generating Terraform configuration, etc.
type ResourceImporter struct {
// The functions below must all be implemented for importing to work.
// State is called to convert an ID to one or more InstanceState to
// insert into the Terraform state. If this isn't specified, then
// the ID is passed straight through.
State StateFunc
}
// StateFunc is the function called to import a resource into the
// Terraform state. It is given a ResourceData with only ID set. This
// ID is going to be an arbitrary value given by the user and may not map
// directly to the ID format that the resource expects, so that should
// be validated.
//
// This should return a slice of ResourceData that turn into the state
// that was imported. This might be as simple as returning only the argument
// that was given to the function. In other cases (such as AWS security groups),
// an import may fan out to multiple resources and this will have to return
// multiple.
//
// To create the ResourceData structures for other resource types (if
// you have to), instantiate your resource and call the Data function.
type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
// InternalValidate should be called to validate the structure of this
// importer. This should be called in a unit test.
//
// Resource.InternalValidate() will automatically call this, so this doesn't
// need to be called manually. Further, Resource.InternalValidate() is
// automatically called by Provider.InternalValidate(), so you only need
// to internal validate the provider.
func (r *ResourceImporter) InternalValidate() error {
return nil
}
// ImportStatePassthrough is an implementation of StateFunc that can be
// used to simply pass the ID directly through. This should be used only
// in the case that an ID-only refresh is possible.
func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
return []*ResourceData{d}, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,263 +0,0 @@
package schema
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/copystructure"
)
const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
const TimeoutsConfigKey = "timeouts"
const (
TimeoutCreate = "create"
TimeoutRead = "read"
TimeoutUpdate = "update"
TimeoutDelete = "delete"
TimeoutDefault = "default"
)
func timeoutKeys() []string {
return []string{
TimeoutCreate,
TimeoutRead,
TimeoutUpdate,
TimeoutDelete,
TimeoutDefault,
}
}
// could be time.Duration, int64 or float64
func DefaultTimeout(tx interface{}) *time.Duration {
var td time.Duration
switch raw := tx.(type) {
case time.Duration:
return &raw
case int64:
td = time.Duration(raw)
case float64:
td = time.Duration(int64(raw))
default:
log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
}
return &td
}
type ResourceTimeout struct {
Create, Read, Update, Delete, Default *time.Duration
}
// ConfigDecode takes a schema and the configuration (available in Diff) and
// validates, parses the timeouts into `t`
func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
if s.Timeouts != nil {
raw, err := copystructure.Copy(s.Timeouts)
if err != nil {
log.Printf("[DEBUG] Error with deep copy: %s", err)
}
*t = *raw.(*ResourceTimeout)
}
if raw, ok := c.Config[TimeoutsConfigKey]; ok {
var rawTimeouts []map[string]interface{}
switch raw := raw.(type) {
case map[string]interface{}:
rawTimeouts = append(rawTimeouts, raw)
case []map[string]interface{}:
rawTimeouts = raw
case string:
if raw == hcl2shim.UnknownVariableValue {
// Timeout is not defined in the config
// Defaults will be used instead
return nil
} else {
log.Printf("[ERROR] Invalid timeout value: %q", raw)
return fmt.Errorf("Invalid Timeout value found")
}
case []interface{}:
for _, r := range raw {
if rMap, ok := r.(map[string]interface{}); ok {
rawTimeouts = append(rawTimeouts, rMap)
} else {
// Go will not allow a fallthrough
log.Printf("[ERROR] Invalid timeout structure: %#v", raw)
return fmt.Errorf("Invalid Timeout structure found")
}
}
default:
log.Printf("[ERROR] Invalid timeout structure: %#v", raw)
return fmt.Errorf("Invalid Timeout structure found")
}
for _, timeoutValues := range rawTimeouts {
for timeKey, timeValue := range timeoutValues {
// validate that we're dealing with the normal CRUD actions
var found bool
for _, key := range timeoutKeys() {
if timeKey == key {
found = true
break
}
}
if !found {
return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
}
// Get timeout
rt, err := time.ParseDuration(timeValue.(string))
if err != nil {
return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err)
}
var timeout *time.Duration
switch timeKey {
case TimeoutCreate:
timeout = t.Create
case TimeoutUpdate:
timeout = t.Update
case TimeoutRead:
timeout = t.Read
case TimeoutDelete:
timeout = t.Delete
case TimeoutDefault:
timeout = t.Default
}
// If the resource has not delcared this in the definition, then error
// with an unsupported message
if timeout == nil {
return unsupportedTimeoutKeyError(timeKey)
}
*timeout = rt
}
return nil
}
}
return nil
}
func unsupportedTimeoutKeyError(key string) error {
return fmt.Errorf("Timeout Key (%s) is not supported", key)
}
// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
// interface: they encode/decode a timeouts struct from an instance diff, which is
// where the timeout data is stored after a diff to pass into Apply.
//
// StateEncode encodes the timeout into the ResourceData's InstanceState for
// saving to state
//
func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
return t.metaEncode(id)
}
func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
return t.metaEncode(is)
}
// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
// and stores it in the Meta field of the interface it's given.
// Assumes the interface is either *terraform.InstanceState or
// *terraform.InstanceDiff, returns an error otherwise
func (t *ResourceTimeout) metaEncode(ids interface{}) error {
m := make(map[string]interface{})
if t.Create != nil {
m[TimeoutCreate] = t.Create.Nanoseconds()
}
if t.Read != nil {
m[TimeoutRead] = t.Read.Nanoseconds()
}
if t.Update != nil {
m[TimeoutUpdate] = t.Update.Nanoseconds()
}
if t.Delete != nil {
m[TimeoutDelete] = t.Delete.Nanoseconds()
}
if t.Default != nil {
m[TimeoutDefault] = t.Default.Nanoseconds()
// for any key above that is nil, if default is specified, we need to
// populate it with the default
for _, k := range timeoutKeys() {
if _, ok := m[k]; !ok {
m[k] = t.Default.Nanoseconds()
}
}
}
// only add the Timeout to the Meta if we have values
if len(m) > 0 {
switch instance := ids.(type) {
case *terraform.InstanceDiff:
if instance.Meta == nil {
instance.Meta = make(map[string]interface{})
}
instance.Meta[TimeoutKey] = m
case *terraform.InstanceState:
if instance.Meta == nil {
instance.Meta = make(map[string]interface{})
}
instance.Meta[TimeoutKey] = m
default:
return fmt.Errorf("Error matching type for Diff Encode")
}
}
return nil
}
func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
return t.metaDecode(id)
}
func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
return t.metaDecode(is)
}
func (t *ResourceTimeout) metaDecode(ids interface{}) error {
var rawMeta interface{}
var ok bool
switch rawInstance := ids.(type) {
case *terraform.InstanceDiff:
rawMeta, ok = rawInstance.Meta[TimeoutKey]
if !ok {
return nil
}
case *terraform.InstanceState:
rawMeta, ok = rawInstance.Meta[TimeoutKey]
if !ok {
return nil
}
default:
return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
}
times := rawMeta.(map[string]interface{})
if len(times) == 0 {
return nil
}
if v, ok := times[TimeoutCreate]; ok {
t.Create = DefaultTimeout(v)
}
if v, ok := times[TimeoutRead]; ok {
t.Read = DefaultTimeout(v)
}
if v, ok := times[TimeoutUpdate]; ok {
t.Update = DefaultTimeout(v)
}
if v, ok := times[TimeoutDelete]; ok {
t.Delete = DefaultTimeout(v)
}
if v, ok := times[TimeoutDefault]; ok {
t.Default = DefaultTimeout(v)
}
return nil
}

View File

@ -1,376 +0,0 @@
package schema
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/hashicorp/terraform/terraform"
)
func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) {
cases := []struct {
Name string
// what the resource has defined in source
ResourceDefaultTimeout *ResourceTimeout
// configuration provider by user in tf file
Config map[string]interface{}
// what we expect the parsed ResourceTimeout to be
Expected *ResourceTimeout
// Should we have an error (key not defined in source)
ShouldErr bool
}{
{
Name: "Source does not define 'delete' key",
ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 0),
Config: expectedConfigForValues(2, 0, 0, 1, 0),
Expected: timeoutForValues(10, 0, 5, 0, 0),
ShouldErr: true,
},
{
Name: "Config overrides create",
ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 0),
Config: expectedConfigForValues(2, 0, 7, 0, 0),
Expected: timeoutForValues(2, 0, 7, 0, 0),
ShouldErr: false,
},
{
Name: "Config overrides create, default provided. Should still have zero values",
ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 3),
Config: expectedConfigForValues(2, 0, 7, 0, 0),
Expected: timeoutForValues(2, 0, 7, 0, 3),
ShouldErr: false,
},
{
Name: "Use something besides 'minutes'",
ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 3),
Config: map[string]interface{}{
"create": "2h",
},
Expected: timeoutForValues(120, 0, 5, 0, 3),
ShouldErr: false,
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d-%s", i, c.Name), func(t *testing.T) {
r := &Resource{
Timeouts: c.ResourceDefaultTimeout,
}
conf := terraform.NewResourceConfigRaw(
map[string]interface{}{
"foo": "bar",
TimeoutsConfigKey: c.Config,
},
)
timeout := &ResourceTimeout{}
decodeErr := timeout.ConfigDecode(r, conf)
if c.ShouldErr {
if decodeErr == nil {
t.Fatalf("ConfigDecode case (%d): Expected bad timeout key: %s", i, decodeErr)
}
// should error, err was not nil, continue
return
} else {
if decodeErr != nil {
// should not error, error was not nil, fatal
t.Fatalf("decodeError was not nil: %s", decodeErr)
}
}
if !reflect.DeepEqual(c.Expected, timeout) {
t.Fatalf("ConfigDecode match error case (%d).\nExpected:\n%#v\nGot:\n%#v", i, c.Expected, timeout)
}
})
}
}
func TestResourceTimeout_ConfigDecode(t *testing.T) {
r := &Resource{
Timeouts: &ResourceTimeout{
Create: DefaultTimeout(10 * time.Minute),
Update: DefaultTimeout(5 * time.Minute),
},
}
c := terraform.NewResourceConfigRaw(
map[string]interface{}{
"foo": "bar",
TimeoutsConfigKey: map[string]interface{}{
"create": "2m",
"update": "1m",
},
},
)
timeout := &ResourceTimeout{}
err := timeout.ConfigDecode(r, c)
if err != nil {
t.Fatalf("Expected good timeout returned:, %s", err)
}
expected := &ResourceTimeout{
Create: DefaultTimeout(2 * time.Minute),
Update: DefaultTimeout(1 * time.Minute),
}
if !reflect.DeepEqual(timeout, expected) {
t.Fatalf("bad timeout decode.\nExpected:\n%#v\nGot:\n%#v\n", expected, timeout)
}
}
func TestResourceTimeout_legacyConfigDecode(t *testing.T) {
r := &Resource{
Timeouts: &ResourceTimeout{
Create: DefaultTimeout(10 * time.Minute),
Update: DefaultTimeout(5 * time.Minute),
},
}
c := terraform.NewResourceConfigRaw(
map[string]interface{}{
"foo": "bar",
TimeoutsConfigKey: []interface{}{
map[string]interface{}{
"create": "2m",
"update": "1m",
},
},
},
)
timeout := &ResourceTimeout{}
err := timeout.ConfigDecode(r, c)
if err != nil {
t.Fatalf("Expected good timeout returned:, %s", err)
}
expected := &ResourceTimeout{
Create: DefaultTimeout(2 * time.Minute),
Update: DefaultTimeout(1 * time.Minute),
}
if !reflect.DeepEqual(timeout, expected) {
t.Fatalf("bad timeout decode.\nExpected:\n%#v\nGot:\n%#v\n", expected, timeout)
}
}
func TestResourceTimeout_DiffEncode_basic(t *testing.T) {
cases := []struct {
Timeout *ResourceTimeout
Expected map[string]interface{}
// Not immediately clear when an error would hit
ShouldErr bool
}{
// Two fields
{
Timeout: timeoutForValues(10, 0, 5, 0, 0),
Expected: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 5, 0, 0)},
ShouldErr: false,
},
// Two fields, one is Default
{
Timeout: timeoutForValues(10, 0, 0, 0, 7),
Expected: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 0, 0, 7)},
ShouldErr: false,
},
// All fields
{
Timeout: timeoutForValues(10, 3, 4, 1, 7),
Expected: map[string]interface{}{TimeoutKey: expectedForValues(10, 3, 4, 1, 7)},
ShouldErr: false,
},
// No fields
{
Timeout: &ResourceTimeout{},
Expected: nil,
ShouldErr: false,
},
}
for _, c := range cases {
state := &terraform.InstanceDiff{}
err := c.Timeout.DiffEncode(state)
if err != nil && !c.ShouldErr {
t.Fatalf("Error, expected:\n%#v\n got:\n%#v\n", c.Expected, state.Meta)
}
// should maybe just compare [TimeoutKey] but for now we're assuming only
// that in Meta
if !reflect.DeepEqual(state.Meta, c.Expected) {
t.Fatalf("Encode not equal, expected:\n%#v\n\ngot:\n%#v\n", c.Expected, state.Meta)
}
}
// same test cases but for InstanceState
for _, c := range cases {
state := &terraform.InstanceState{}
err := c.Timeout.StateEncode(state)
if err != nil && !c.ShouldErr {
t.Fatalf("Error, expected:\n%#v\n got:\n%#v\n", c.Expected, state.Meta)
}
// should maybe just compare [TimeoutKey] but for now we're assuming only
// that in Meta
if !reflect.DeepEqual(state.Meta, c.Expected) {
t.Fatalf("Encode not equal, expected:\n%#v\n\ngot:\n%#v\n", c.Expected, state.Meta)
}
}
}
func TestResourceTimeout_MetaDecode_basic(t *testing.T) {
cases := []struct {
State *terraform.InstanceDiff
Expected *ResourceTimeout
// Not immediately clear when an error would hit
ShouldErr bool
}{
// Two fields
{
State: &terraform.InstanceDiff{Meta: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 5, 0, 0)}},
Expected: timeoutForValues(10, 0, 5, 0, 0),
ShouldErr: false,
},
// Two fields, one is Default
{
State: &terraform.InstanceDiff{Meta: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 0, 0, 7)}},
Expected: timeoutForValues(10, 7, 7, 7, 7),
ShouldErr: false,
},
// All fields
{
State: &terraform.InstanceDiff{Meta: map[string]interface{}{TimeoutKey: expectedForValues(10, 3, 4, 1, 7)}},
Expected: timeoutForValues(10, 3, 4, 1, 7),
ShouldErr: false,
},
// No fields
{
State: &terraform.InstanceDiff{},
Expected: &ResourceTimeout{},
ShouldErr: false,
},
}
for _, c := range cases {
rt := &ResourceTimeout{}
err := rt.DiffDecode(c.State)
if err != nil && !c.ShouldErr {
t.Fatalf("Error, expected:\n%#v\n got:\n%#v\n", c.Expected, rt)
}
// should maybe just compare [TimeoutKey] but for now we're assuming only
// that in Meta
if !reflect.DeepEqual(rt, c.Expected) {
t.Fatalf("Encode not equal, expected:\n%#v\n\ngot:\n%#v\n", c.Expected, rt)
}
}
}
func timeoutForValues(create, read, update, del, def int) *ResourceTimeout {
rt := ResourceTimeout{}
if create != 0 {
rt.Create = DefaultTimeout(time.Duration(create) * time.Minute)
}
if read != 0 {
rt.Read = DefaultTimeout(time.Duration(read) * time.Minute)
}
if update != 0 {
rt.Update = DefaultTimeout(time.Duration(update) * time.Minute)
}
if del != 0 {
rt.Delete = DefaultTimeout(time.Duration(del) * time.Minute)
}
if def != 0 {
rt.Default = DefaultTimeout(time.Duration(def) * time.Minute)
}
return &rt
}
// Generates a ResourceTimeout struct that should reflect the
// d.Timeout("key") results
func expectedTimeoutForValues(create, read, update, del, def int) *ResourceTimeout {
rt := ResourceTimeout{}
defaultValues := []*int{&create, &read, &update, &del, &def}
for _, v := range defaultValues {
if *v == 0 {
*v = 20
}
}
if create != 0 {
rt.Create = DefaultTimeout(time.Duration(create) * time.Minute)
}
if read != 0 {
rt.Read = DefaultTimeout(time.Duration(read) * time.Minute)
}
if update != 0 {
rt.Update = DefaultTimeout(time.Duration(update) * time.Minute)
}
if del != 0 {
rt.Delete = DefaultTimeout(time.Duration(del) * time.Minute)
}
if def != 0 {
rt.Default = DefaultTimeout(time.Duration(def) * time.Minute)
}
return &rt
}
func expectedForValues(create, read, update, del, def int) map[string]interface{} {
ex := make(map[string]interface{})
if create != 0 {
ex["create"] = DefaultTimeout(time.Duration(create) * time.Minute).Nanoseconds()
}
if read != 0 {
ex["read"] = DefaultTimeout(time.Duration(read) * time.Minute).Nanoseconds()
}
if update != 0 {
ex["update"] = DefaultTimeout(time.Duration(update) * time.Minute).Nanoseconds()
}
if del != 0 {
ex["delete"] = DefaultTimeout(time.Duration(del) * time.Minute).Nanoseconds()
}
if def != 0 {
defNano := DefaultTimeout(time.Duration(def) * time.Minute).Nanoseconds()
ex["default"] = defNano
for _, k := range timeoutKeys() {
if _, ok := ex[k]; !ok {
ex[k] = defNano
}
}
}
return ex
}
func expectedConfigForValues(create, read, update, delete, def int) map[string]interface{} {
ex := make(map[string]interface{}, 0)
if create != 0 {
ex["create"] = fmt.Sprintf("%dm", create)
}
if read != 0 {
ex["read"] = fmt.Sprintf("%dm", read)
}
if update != 0 {
ex["update"] = fmt.Sprintf("%dm", update)
}
if delete != 0 {
ex["delete"] = fmt.Sprintf("%dm", delete)
}
if def != 0 {
ex["default"] = fmt.Sprintf("%dm", def)
}
return ex
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,125 +0,0 @@
package schema
import (
"bytes"
"fmt"
"sort"
"strconv"
)
func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
if val == nil {
buf.WriteRune(';')
return
}
switch schema.Type {
case TypeBool:
if val.(bool) {
buf.WriteRune('1')
} else {
buf.WriteRune('0')
}
case TypeInt:
buf.WriteString(strconv.Itoa(val.(int)))
case TypeFloat:
buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
case TypeString:
buf.WriteString(val.(string))
case TypeList:
buf.WriteRune('(')
l := val.([]interface{})
for _, innerVal := range l {
serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
}
buf.WriteRune(')')
case TypeMap:
m := val.(map[string]interface{})
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
buf.WriteRune('[')
for _, k := range keys {
innerVal := m[k]
if innerVal == nil {
continue
}
buf.WriteString(k)
buf.WriteRune(':')
switch innerVal := innerVal.(type) {
case int:
buf.WriteString(strconv.Itoa(innerVal))
case float64:
buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
case string:
buf.WriteString(innerVal)
default:
panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
}
buf.WriteRune(';')
}
buf.WriteRune(']')
case TypeSet:
buf.WriteRune('{')
s := val.(*Set)
for _, innerVal := range s.List() {
serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
}
buf.WriteRune('}')
default:
panic("unknown schema type to serialize")
}
buf.WriteRune(';')
}
// SerializeValueForHash appends a serialization of the given resource config
// to the given buffer, guaranteeing deterministic results given the same value
// and schema.
//
// Its primary purpose is as input into a hashing function in order
// to hash complex substructures when used in sets, and so the serialization
// is not reversible.
func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
if val == nil {
return
}
sm := resource.Schema
m := val.(map[string]interface{})
var keys []string
for k := range sm {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
innerSchema := sm[k]
// Skip attributes that are not user-provided. Computed attributes
// do not contribute to the hash since their ultimate value cannot
// be known at plan/diff time.
if !(innerSchema.Required || innerSchema.Optional) {
continue
}
buf.WriteString(k)
buf.WriteRune(':')
innerVal := m[k]
SerializeValueForHash(buf, innerVal, innerSchema)
}
}
func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
switch tElem := elem.(type) {
case *Schema:
SerializeValueForHash(buf, val, tElem)
case *Resource:
buf.WriteRune('<')
SerializeResourceForHash(buf, val, tElem)
buf.WriteString(">;")
default:
panic(fmt.Sprintf("invalid element type: %T", tElem))
}
}

View File

@ -1,238 +0,0 @@
package schema
import (
"bytes"
"testing"
)
func TestSerializeForHash(t *testing.T) {
type testCase struct {
Schema interface{}
Value interface{}
Expected string
}
tests := []testCase{
testCase{
Schema: &Schema{
Type: TypeInt,
},
Value: 0,
Expected: "0;",
},
testCase{
Schema: &Schema{
Type: TypeInt,
},
Value: 200,
Expected: "200;",
},
testCase{
Schema: &Schema{
Type: TypeBool,
},
Value: true,
Expected: "1;",
},
testCase{
Schema: &Schema{
Type: TypeBool,
},
Value: false,
Expected: "0;",
},
testCase{
Schema: &Schema{
Type: TypeFloat,
},
Value: 1.0,
Expected: "1;",
},
testCase{
Schema: &Schema{
Type: TypeFloat,
},
Value: 1.54,
Expected: "1.54;",
},
testCase{
Schema: &Schema{
Type: TypeFloat,
},
Value: 0.1,
Expected: "0.1;",
},
testCase{
Schema: &Schema{
Type: TypeString,
},
Value: "hello",
Expected: "hello;",
},
testCase{
Schema: &Schema{
Type: TypeString,
},
Value: "1",
Expected: "1;",
},
testCase{
Schema: &Schema{
Type: TypeList,
Elem: &Schema{
Type: TypeString,
},
},
Value: []interface{}{},
Expected: "();",
},
testCase{
Schema: &Schema{
Type: TypeList,
Elem: &Schema{
Type: TypeString,
},
},
Value: []interface{}{"hello", "world"},
Expected: "(hello;world;);",
},
testCase{
Schema: &Schema{
Type: TypeList,
Elem: &Resource{
Schema: map[string]*Schema{
"fo": &Schema{
Type: TypeString,
Required: true,
},
"fum": &Schema{
Type: TypeString,
Required: true,
},
},
},
},
Value: []interface{}{
map[string]interface{}{
"fo": "bar",
},
map[string]interface{}{
"fo": "baz",
"fum": "boz",
},
},
Expected: "(<fo:bar;fum:;>;<fo:baz;fum:boz;>;);",
},
testCase{
Schema: &Schema{
Type: TypeSet,
Elem: &Schema{
Type: TypeString,
},
},
Value: NewSet(func(i interface{}) int { return len(i.(string)) }, []interface{}{
"hello",
"woo",
}),
Expected: "{woo;hello;};",
},
testCase{
Schema: &Schema{
Type: TypeMap,
Elem: &Schema{
Type: TypeString,
},
},
Value: map[string]interface{}{
"foo": "bar",
"baz": "foo",
},
Expected: "[baz:foo;foo:bar;];",
},
testCase{
Schema: &Resource{
Schema: map[string]*Schema{
"name": &Schema{
Type: TypeString,
Required: true,
},
"size": &Schema{
Type: TypeInt,
Optional: true,
},
"green": &Schema{
Type: TypeBool,
Optional: true,
Computed: true,
},
"upside_down": &Schema{
Type: TypeBool,
Computed: true,
},
},
},
Value: map[string]interface{}{
"name": "my-fun-database",
"size": 12,
"green": true,
},
Expected: "green:1;name:my-fun-database;size:12;",
},
// test TypeMap nested in Schema: GH-7091
testCase{
Schema: &Resource{
Schema: map[string]*Schema{
"outer": &Schema{
Type: TypeSet,
Required: true,
Elem: &Schema{
Type: TypeMap,
Optional: true,
},
},
},
},
Value: map[string]interface{}{
"outer": NewSet(func(i interface{}) int { return 42 }, []interface{}{
map[string]interface{}{
"foo": "bar",
"baz": "foo",
},
}),
},
Expected: "outer:{[baz:foo;foo:bar;];};",
},
}
for _, test := range tests {
var gotBuf bytes.Buffer
schema := test.Schema
switch s := schema.(type) {
case *Schema:
SerializeValueForHash(&gotBuf, test.Value, s)
case *Resource:
SerializeResourceForHash(&gotBuf, test.Value, s)
}
got := gotBuf.String()
if got != test.Expected {
t.Errorf("hash(%#v) got %#v, but want %#v", test.Value, got, test.Expected)
}
}
}

View File

@ -1,250 +0,0 @@
package schema
import (
"bytes"
"fmt"
"reflect"
"sort"
"strconv"
"sync"
"github.com/hashicorp/terraform/helper/hashcode"
)
// HashString hashes strings. If you want a Set of strings, this is the
// SchemaSetFunc you want.
func HashString(v interface{}) int {
return hashcode.String(v.(string))
}
// HashInt hashes integers. If you want a Set of integers, this is the
// SchemaSetFunc you want.
func HashInt(v interface{}) int {
return hashcode.String(strconv.Itoa(v.(int)))
}
// HashResource hashes complex structures that are described using
// a *Resource. This is the default set implementation used when a set's
// element type is a full resource.
func HashResource(resource *Resource) SchemaSetFunc {
return func(v interface{}) int {
var buf bytes.Buffer
SerializeResourceForHash(&buf, v, resource)
return hashcode.String(buf.String())
}
}
// HashSchema hashes values that are described using a *Schema. This is the
// default set implementation used when a set's element type is a single
// schema.
func HashSchema(schema *Schema) SchemaSetFunc {
return func(v interface{}) int {
var buf bytes.Buffer
SerializeValueForHash(&buf, v, schema)
return hashcode.String(buf.String())
}
}
// Set is a set data structure that is returned for elements of type
// TypeSet.
type Set struct {
F SchemaSetFunc
m map[string]interface{}
once sync.Once
}
// NewSet is a convenience method for creating a new set with the given
// items.
func NewSet(f SchemaSetFunc, items []interface{}) *Set {
s := &Set{F: f}
for _, i := range items {
s.Add(i)
}
return s
}
// CopySet returns a copy of another set.
func CopySet(otherSet *Set) *Set {
return NewSet(otherSet.F, otherSet.List())
}
// Add adds an item to the set if it isn't already in the set.
func (s *Set) Add(item interface{}) {
s.add(item, false)
}
// Remove removes an item if it's already in the set. Idempotent.
func (s *Set) Remove(item interface{}) {
s.remove(item)
}
// Contains checks if the set has the given item.
func (s *Set) Contains(item interface{}) bool {
_, ok := s.m[s.hash(item)]
return ok
}
// Len returns the amount of items in the set.
func (s *Set) Len() int {
return len(s.m)
}
// List returns the elements of this set in slice format.
//
// The order of the returned elements is deterministic. Given the same
// set, the order of this will always be the same.
func (s *Set) List() []interface{} {
result := make([]interface{}, len(s.m))
for i, k := range s.listCode() {
result[i] = s.m[k]
}
return result
}
// Difference performs a set difference of the two sets, returning
// a new third set that has only the elements unique to this set.
func (s *Set) Difference(other *Set) *Set {
result := &Set{F: s.F}
result.once.Do(result.init)
for k, v := range s.m {
if _, ok := other.m[k]; !ok {
result.m[k] = v
}
}
return result
}
// Intersection performs the set intersection of the two sets
// and returns a new third set.
func (s *Set) Intersection(other *Set) *Set {
result := &Set{F: s.F}
result.once.Do(result.init)
for k, v := range s.m {
if _, ok := other.m[k]; ok {
result.m[k] = v
}
}
return result
}
// Union performs the set union of the two sets and returns a new third
// set.
func (s *Set) Union(other *Set) *Set {
result := &Set{F: s.F}
result.once.Do(result.init)
for k, v := range s.m {
result.m[k] = v
}
for k, v := range other.m {
result.m[k] = v
}
return result
}
func (s *Set) Equal(raw interface{}) bool {
other, ok := raw.(*Set)
if !ok {
return false
}
return reflect.DeepEqual(s.m, other.m)
}
// HashEqual simply checks to the keys the top-level map to the keys in the
// other set's top-level map to see if they are equal. This obviously assumes
// you have a properly working hash function - use HashResource if in doubt.
func (s *Set) HashEqual(raw interface{}) bool {
other, ok := raw.(*Set)
if !ok {
return false
}
ks1 := make([]string, 0)
ks2 := make([]string, 0)
for k := range s.m {
ks1 = append(ks1, k)
}
for k := range other.m {
ks2 = append(ks2, k)
}
sort.Strings(ks1)
sort.Strings(ks2)
return reflect.DeepEqual(ks1, ks2)
}
func (s *Set) GoString() string {
return fmt.Sprintf("*Set(%#v)", s.m)
}
func (s *Set) init() {
s.m = make(map[string]interface{})
}
func (s *Set) add(item interface{}, computed bool) string {
s.once.Do(s.init)
code := s.hash(item)
if computed {
code = "~" + code
if isProto5() {
tmpCode := code
count := 0
for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] {
count++
tmpCode = fmt.Sprintf("%s%d", code, count)
}
code = tmpCode
}
}
if _, ok := s.m[code]; !ok {
s.m[code] = item
}
return code
}
func (s *Set) hash(item interface{}) string {
code := s.F(item)
// Always return a nonnegative hashcode.
if code < 0 {
code = -code
}
return strconv.Itoa(code)
}
func (s *Set) remove(item interface{}) string {
s.once.Do(s.init)
code := s.hash(item)
delete(s.m, code)
return code
}
func (s *Set) index(item interface{}) int {
return sort.SearchStrings(s.listCode(), s.hash(item))
}
func (s *Set) listCode() []string {
// Sort the hash codes so the order of the list is deterministic
keys := make([]string, 0, len(s.m))
for k := range s.m {
keys = append(keys, k)
}
sort.Sort(sort.StringSlice(keys))
return keys
}

View File

@ -1,217 +0,0 @@
package schema
import (
"reflect"
"testing"
)
func TestSetAdd(t *testing.T) {
s := &Set{F: testSetInt}
s.Add(1)
s.Add(5)
s.Add(25)
expected := []interface{}{1, 25, 5}
actual := s.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestSetAdd_negative(t *testing.T) {
// Since we don't allow negative hashes, this should just hash to the
// same thing...
s := &Set{F: testSetInt}
s.Add(-1)
s.Add(1)
expected := []interface{}{-1}
actual := s.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestSetContains(t *testing.T) {
s := &Set{F: testSetInt}
s.Add(5)
s.Add(-5)
if s.Contains(2) {
t.Fatal("should not contain")
}
if !s.Contains(5) {
t.Fatal("should contain")
}
if !s.Contains(-5) {
t.Fatal("should contain")
}
}
func TestSetDifference(t *testing.T) {
s1 := &Set{F: testSetInt}
s2 := &Set{F: testSetInt}
s1.Add(1)
s1.Add(5)
s2.Add(5)
s2.Add(25)
difference := s1.Difference(s2)
difference.Add(2)
expected := []interface{}{1, 2}
actual := difference.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestSetIntersection(t *testing.T) {
s1 := &Set{F: testSetInt}
s2 := &Set{F: testSetInt}
s1.Add(1)
s1.Add(5)
s2.Add(5)
s2.Add(25)
intersection := s1.Intersection(s2)
intersection.Add(2)
expected := []interface{}{2, 5}
actual := intersection.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestSetUnion(t *testing.T) {
s1 := &Set{F: testSetInt}
s2 := &Set{F: testSetInt}
s1.Add(1)
s1.Add(5)
s2.Add(5)
s2.Add(25)
union := s1.Union(s2)
union.Add(2)
expected := []interface{}{1, 2, 25, 5}
actual := union.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func testSetInt(v interface{}) int {
return v.(int)
}
func TestHashResource_nil(t *testing.T) {
resource := &Resource{
Schema: map[string]*Schema{
"name": {
Type: TypeString,
Optional: true,
},
},
}
f := HashResource(resource)
idx := f(nil)
if idx != 0 {
t.Fatalf("Expected 0 when hashing nil, given: %d", idx)
}
}
func TestHashEqual(t *testing.T) {
nested := &Resource{
Schema: map[string]*Schema{
"foo": {
Type: TypeString,
Optional: true,
},
},
}
root := &Resource{
Schema: map[string]*Schema{
"bar": {
Type: TypeString,
Optional: true,
},
"nested": {
Type: TypeSet,
Optional: true,
Elem: nested,
},
},
}
n1 := map[string]interface{}{"foo": "bar"}
n2 := map[string]interface{}{"foo": "baz"}
r1 := map[string]interface{}{
"bar": "baz",
"nested": NewSet(HashResource(nested), []interface{}{n1}),
}
r2 := map[string]interface{}{
"bar": "qux",
"nested": NewSet(HashResource(nested), []interface{}{n2}),
}
r3 := map[string]interface{}{
"bar": "baz",
"nested": NewSet(HashResource(nested), []interface{}{n2}),
}
r4 := map[string]interface{}{
"bar": "qux",
"nested": NewSet(HashResource(nested), []interface{}{n1}),
}
s1 := NewSet(HashResource(root), []interface{}{r1})
s2 := NewSet(HashResource(root), []interface{}{r2})
s3 := NewSet(HashResource(root), []interface{}{r3})
s4 := NewSet(HashResource(root), []interface{}{r4})
cases := []struct {
name string
set *Set
compare *Set
expected bool
}{
{
name: "equal",
set: s1,
compare: s1,
expected: true,
},
{
name: "not equal",
set: s1,
compare: s2,
expected: false,
},
{
name: "outer equal, should still not be equal",
set: s1,
compare: s3,
expected: false,
},
{
name: "inner equal, should still not be equal",
set: s1,
compare: s4,
expected: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
actual := tc.set.HashEqual(tc.compare)
if tc.expected != actual {
t.Fatalf("expected %t, got %t", tc.expected, actual)
}
})
}
}

View File

@ -1,115 +0,0 @@
package schema
import (
"encoding/json"
"github.com/zclconf/go-cty/cty"
ctyjson "github.com/zclconf/go-cty/cty/json"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/terraform"
)
// DiffFromValues takes the current state and desired state as cty.Values and
// derives a terraform.InstanceDiff to give to the legacy providers. This is
// used to take the states provided by the new ApplyResourceChange method and
// convert them to a state+diff required for the legacy Apply method.
func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) {
return diffFromValues(prior, planned, res, nil)
}
// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our
// test fixtures from the legacy tests. In the new provider protocol the diff
// only needs to be created for the apply operation, and any customizations
// have already been done.
func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) {
instanceState, err := res.ShimInstanceStateFromValue(prior)
if err != nil {
return nil, err
}
configSchema := res.CoreConfigSchema()
cfg := terraform.NewResourceConfigShimmed(planned, configSchema)
removeConfigUnknowns(cfg.Config)
removeConfigUnknowns(cfg.Raw)
diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false)
if err != nil {
return nil, err
}
return diff, err
}
// During apply the only unknown values are those which are to be computed by
// the resource itself. These may have been marked as unknown config values, and
// need to be removed to prevent the UnknownVariableValue from appearing the diff.
func removeConfigUnknowns(cfg map[string]interface{}) {
for k, v := range cfg {
switch v := v.(type) {
case string:
if v == hcl2shim.UnknownVariableValue {
delete(cfg, k)
}
case []interface{}:
for _, i := range v {
if m, ok := i.(map[string]interface{}); ok {
removeConfigUnknowns(m)
}
}
case map[string]interface{}:
removeConfigUnknowns(v)
}
}
}
// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to
// get a new cty.Value state. This is used to convert the diff returned from
// the legacy provider Diff method to the state required for the new
// PlanResourceChange method.
func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) {
return d.ApplyToValue(base, schema)
}
// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON
// encoding.
func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) {
js, err := ctyjson.Marshal(val, ty)
if err != nil {
return nil, err
}
var m map[string]interface{}
if err := json.Unmarshal(js, &m); err != nil {
return nil, err
}
return m, nil
}
// JSONMapToStateValue takes a generic json map[string]interface{} and converts it
// to the specific type, ensuring that the values conform to the schema.
func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) {
var val cty.Value
js, err := json.Marshal(m)
if err != nil {
return val, err
}
val, err = ctyjson.Unmarshal(js, block.ImpliedType())
if err != nil {
return val, err
}
return block.CoerceValue(val)
}
// StateValueFromInstanceState converts a terraform.InstanceState to a
// cty.Value as described by the provided cty.Type, and maintains the resource
// ID as the "id" attribute.
func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) {
return is.AttrsAsObjectValue(ty)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
package schema
import (
"testing"
"github.com/hashicorp/terraform/terraform"
)
// TestResourceDataRaw creates a ResourceData from a raw configuration map.
func TestResourceDataRaw(
t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
t.Helper()
c := terraform.NewResourceConfigRaw(raw)
sm := schemaMap(schema)
diff, err := sm.Diff(nil, c, nil, nil, true)
if err != nil {
t.Fatalf("err: %s", err)
}
result, err := sm.Data(nil, diff)
if err != nil {
t.Fatalf("err: %s", err)
}
return result
}

View File

@ -1,21 +0,0 @@
package schema
//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go
// ValueType is an enum of the type that can be represented by a schema.
type ValueType int
const (
TypeInvalid ValueType = iota
TypeBool
TypeInt
TypeFloat
TypeString
TypeList
TypeMap
TypeSet
typeObject
)
// NOTE: ValueType has more functions defined on it in schema.go. We can't
// put them here because we reference other files.

View File

@ -1,31 +0,0 @@
// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
package schema
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[TypeInvalid-0]
_ = x[TypeBool-1]
_ = x[TypeInt-2]
_ = x[TypeFloat-3]
_ = x[TypeString-4]
_ = x[TypeList-5]
_ = x[TypeMap-6]
_ = x[TypeSet-7]
_ = x[typeObject-8]
}
const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
func (i ValueType) String() string {
if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
}

View File

@ -1,49 +0,0 @@
package validation
import (
"fmt"
"strings"
"github.com/hashicorp/terraform/helper/schema"
)
// IntBetween returns a SchemaValidateFunc which tests if the provided value
// is of type int and is between min and max (inclusive)
func IntBetween(min, max int) schema.SchemaValidateFunc {
return func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(int)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be int", k))
return
}
if v < min || v > max {
es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v))
return
}
return
}
}
// StringInSlice returns a SchemaValidateFunc which tests if the provided value
// is of type string and matches the value of an element in the valid slice
// will test with in lower case if ignoreCase is true
func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc {
return func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(string)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be string", k))
return
}
for _, str := range valid {
if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) {
return
}
}
es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v))
return
}
}

View File

@ -1,95 +0,0 @@
package validation
import (
"regexp"
"testing"
"github.com/hashicorp/terraform/helper/schema"
)
type testCase struct {
val interface{}
f schema.SchemaValidateFunc
expectedErr *regexp.Regexp
}
func TestValidationIntBetween(t *testing.T) {
runTestCases(t, []testCase{
{
val: 1,
f: IntBetween(1, 1),
},
{
val: 1,
f: IntBetween(0, 2),
},
{
val: 1,
f: IntBetween(2, 3),
expectedErr: regexp.MustCompile("expected [\\w]+ to be in the range \\(2 - 3\\), got 1"),
},
{
val: "1",
f: IntBetween(2, 3),
expectedErr: regexp.MustCompile("expected type of [\\w]+ to be int"),
},
})
}
func TestValidationStringInSlice(t *testing.T) {
runTestCases(t, []testCase{
{
val: "ValidValue",
f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false),
},
// ignore case
{
val: "VALIDVALUE",
f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, true),
},
{
val: "VALIDVALUE",
f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false),
expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[ValidValue AnotherValidValue\\], got VALIDVALUE"),
},
{
val: "InvalidValue",
f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false),
expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[ValidValue AnotherValidValue\\], got InvalidValue"),
},
{
val: 1,
f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false),
expectedErr: regexp.MustCompile("expected type of [\\w]+ to be string"),
},
})
}
func runTestCases(t *testing.T, cases []testCase) {
matchErr := func(errs []error, r *regexp.Regexp) bool {
// err must match one provided
for _, err := range errs {
if r.MatchString(err.Error()) {
return true
}
}
return false
}
for i, tc := range cases {
_, errs := tc.f(tc.val, "test_property")
if len(errs) == 0 && tc.expectedErr == nil {
continue
}
if len(errs) != 0 && tc.expectedErr == nil {
t.Fatalf("expected test case %d to produce no errors, got %v", i, errs)
}
if !matchErr(errs, tc.expectedErr) {
t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs)
}
}
}