terraform: ugly huge change to weave in new HCL2-oriented types

Due to how deeply the configuration types go into Terraform Core, there
isn't a great way to switch out to HCL2 gradually. As a consequence, this
huge commit gets us from the old state to a _compilable_ new state, but
does not yet attempt to fix any tests and has a number of known missing
parts and bugs. We will continue to iterate on this in forthcoming
commits, heading back towards passing tests and making Terraform
fully-functional again.

The three main goals here are:
- Use the configuration models from the "configs" package instead of the
  older models in the "config" package, which is now deprecated and
  preserved only to help us write our migration tool.
- Do expression inspection and evaluation using the functionality of the
  new "lang" package, instead of the Interpolator type and related
  functionality in the main "terraform" package.
- Represent addresses of various objects using types in the addrs package,
  rather than hand-constructed strings. This is not critical to support
  the above, but was a big help during the implementation of these other
  points since it made it much more explicit what kind of address is
  expected in each context.

Since our new packages are built to accommodate some future planned
features that are not yet implemented (e.g. the "for_each" argument on
resources, "count"/"for_each" on modules), and since there's still a fair
amount of functionality still using old-style APIs, there is a moderate
amount of shimming here to connect new assumptions with old, hopefully in
a way that makes it easier to find and eliminate these shims later.

I apologize in advance to the person who inevitably just found this huge
commit while spelunking through the commit history.
This commit is contained in:
Martin Atkins 2018-04-30 10:33:53 -07:00
parent 2c70d884d6
commit c937c06a03
130 changed files with 5290 additions and 4222 deletions

View File

@ -9,12 +9,12 @@ import (
"errors"
"time"
"github.com/hashicorp/terraform/configs"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configload"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
@ -184,11 +184,10 @@ type Operation struct {
// behavior of the operation.
AutoApprove bool
Destroy bool
Targets []addrs.Targetable
Variables map[string]UnparsedVariableValue
AutoApprove bool
DestroyForce bool
ModuleDepth int
Parallelism int
Targets []string
Variables map[string]interface{}
// Input/output/control options.
UIIn terraform.UIInput

View File

@ -137,11 +137,11 @@ func (b *Local) opApply(
// Start the apply in a goroutine so that we can be interrupted.
var applyState *terraform.State
var applyErr error
var applyDiags tfdiags.Diagnostics
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
_, applyErr = tfCtx.Apply()
_, applyDiags = tfCtx.Apply()
// we always want the state, even if apply failed
applyState = tfCtx.State()
}()
@ -165,12 +165,8 @@ func (b *Local) opApply(
return
}
if applyErr != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
applyErr.Error(),
"Terraform does not automatically rollback in the face of errors. Instead, your Terraform state file has been partially updated with any resources that successfully completed. Please address the error above and apply again to incrementally change your infrastructure.",
))
diags = diags.Append(applyDiags)
if applyDiags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}

View File

@ -2,7 +2,6 @@ package local
import (
"context"
"errors"
"github.com/hashicorp/errwrap"
@ -58,15 +57,23 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State,
opts.Destroy = op.Destroy
opts.Targets = op.Targets
opts.UIInput = op.UIIn
if op.Variables != nil {
opts.Variables = op.Variables
}
// FIXME: Configuration is temporarily stubbed out here to artificially
// create a stopping point in our work to switch to the new config loader.
// This means no backend-provided Terraform operations will actually work.
// This will be addressed in a subsequent commit.
opts.Module = nil
// Load the configuration using the caller-provided configuration loader.
config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, nil, diags
}
opts.Config = config
variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables)
diags = diags.Append(varDiags)
if diags.HasErrors() {
return nil, nil, diags
}
if op.Variables != nil {
opts.Variables = variables
}
// Load our state
// By the time we get here, the backend creation code in "command" took
@ -77,23 +84,14 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State,
// Build the context
var tfCtx *terraform.Context
var ctxDiags tfdiags.Diagnostics
if op.Plan != nil {
tfCtx, err = op.Plan.Context(&opts)
tfCtx, ctxDiags = op.Plan.Context(&opts)
} else {
tfCtx, err = terraform.NewContext(&opts)
tfCtx, ctxDiags = terraform.NewContext(&opts)
}
// any errors resolving plugins returns this
if rpe, ok := err.(*terraform.ResourceProviderError); ok {
b.pluginInitRequired(rpe)
// we wrote the full UI error here, so return a generic error for flow
// control in the command.
diags = diags.Append(errors.New("Can't satisfy plugin requirements"))
return nil, nil, diags
}
if err != nil {
diags = diags.Append(err)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
return nil, nil, diags
}
@ -106,8 +104,9 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State,
mode |= terraform.InputModeVar
mode |= terraform.InputModeVarUnset
if err := tfCtx.Input(mode); err != nil {
diags = diags.Append(errwrap.Wrapf("Error asking for user input: {{err}}", err))
inputDiags := tfCtx.Input(mode)
diags = diags.Append(inputDiags)
if inputDiags.HasErrors() {
return nil, nil, diags
}
}

View File

@ -87,20 +87,20 @@ func (b *Local) opPlan(
// Perform the plan in a goroutine so we can be interrupted
var plan *terraform.Plan
var planErr error
var planDiags tfdiags.Diagnostics
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
log.Printf("[INFO] backend/local: plan calling Plan")
plan, planErr = tfCtx.Plan()
plan, planDiags = tfCtx.Plan()
}()
if b.opWait(doneCh, stopCtx, cancelCtx, tfCtx, opState) {
return
}
if planErr != nil {
diags = diags.Append(planErr)
diags = diags.Append(planDiags)
if planDiags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}

View File

@ -60,11 +60,11 @@ func (b *Local) opRefresh(
// Perform the refresh in a goroutine so we can be interrupted
var newState *terraform.State
var refreshErr error
var refreshDiags tfdiags.Diagnostics
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
newState, refreshErr = tfCtx.Refresh()
newState, refreshDiags = tfCtx.Refresh()
log.Printf("[INFO] backend/local: refresh calling Refresh")
}()
@ -74,8 +74,8 @@ func (b *Local) opRefresh(
// write the resulting state to the running op
runningOp.State = newState
if refreshErr != nil {
diags = diags.Append(refreshErr)
diags = diags.Append(refreshDiags)
if refreshDiags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}

84
backend/unparsed_value.go Normal file
View File

@ -0,0 +1,84 @@
package backend
import (
"fmt"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
)
// UnparsedVariableValue represents a variable value provided by the caller
// whose parsing must be deferred until configuration is available.
//
// This exists to allow processing of variable-setting arguments (e.g. in the
// command package) to be separated from parsing (in the backend package).
type UnparsedVariableValue interface {
// ParseVariableValue information in the provided variable configuration
// to parse (if necessary) and return the variable value encapsulated in
// the receiver.
//
// If error diagnostics are returned, the resulting value may be invalid
// or incomplete.
ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics)
}
func ParseVariableValues(vv map[string]UnparsedVariableValue, decls map[string]*configs.Variable) (terraform.InputValues, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
ret := make(terraform.InputValues, len(vv))
for name, rv := range vv {
var mode configs.VariableParsingMode
config, declared := decls[name]
if declared {
mode = config.ParsingMode
} else {
mode = configs.VariableParseLiteral
}
val, valDiags := rv.ParseVariableValue(mode)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
continue
}
if !declared {
switch val.SourceType {
case terraform.ValueFromConfig, terraform.ValueFromFile:
// These source types have source ranges, so we can produce
// a nice error message with good context.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Value for undeclared variable",
Detail: fmt.Sprintf("The root module does not declare a variable named %q. To use this value, add a \"variable\" block to the configuration.", name),
Subject: val.SourceRange.ToHCL().Ptr(),
})
case terraform.ValueFromEnvVar:
// We allow and ignore undeclared names for environment
// variables, because users will often set these globally
// when they are used across many (but not necessarily all)
// configurations.
case terraform.ValueFromCLIArg:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Value for undeclared variable",
fmt.Sprintf("A variable named %q was assigned on the command line, but the root module does not declare a variable of that name. To use this value, add a \"variable\" block to the configuration.", name),
))
default:
// For all other source types we are more vague, but other situations
// don't generally crop up at this layer in practice.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Value for undeclared variable",
fmt.Sprintf("A variable named %q was assigned a value, but the root module does not declare a variable of that name. To use this value, add a \"variable\" block to the configuration.", name),
))
}
continue
}
ret[name] = val
}
return ret, diags
}

View File

@ -7,14 +7,14 @@ import (
"sort"
"strings"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
)
// ApplyCommand is a Command implementation that applies a Terraform
@ -314,7 +314,7 @@ Options:
return strings.TrimSpace(helpText)
}
func outputsAsString(state *terraform.State, modPath []string, schema []*config.Output, includeHeader bool) string {
func outputsAsString(state *terraform.State, modPath addrs.ModuleInstance, schema []*config.Output, includeHeader bool) string {
if state == nil {
return ""
}

View File

@ -3,6 +3,11 @@ package command
import (
"fmt"
"strings"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/tfdiags"
)
// FlagStringKV is a flag.Value implementation for parsing user variables
@ -41,3 +46,34 @@ func (v *FlagStringSlice) Set(raw string) error {
return nil
}
// FlagTargetSlice is a flag.Value implementation for parsing target addresses
// from the command line, such as -target=aws_instance.foo -target=aws_vpc.bar .
type FlagTargetSlice []addrs.Targetable
func (v *FlagTargetSlice) String() string {
return ""
}
func (v *FlagTargetSlice) Set(raw string) error {
// FIXME: This is not an ideal way to deal with this because it requires
// us to do parsing in a context where we can't nicely return errors
// to the user.
var diags tfdiags.Diagnostics
synthFilename := fmt.Sprintf("-target=%q", raw)
traversal, syntaxDiags := hclsyntax.ParseTraversalAbs([]byte(raw), synthFilename, hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(syntaxDiags)
if syntaxDiags.HasErrors() {
return diags.Err()
}
target, targetDiags := addrs.ParseTarget(traversal)
diags = diags.Append(targetDiags)
if targetDiags.HasErrors() {
return diags.Err()
}
*v = append(*v, target.Subject)
return nil
}

View File

@ -127,12 +127,13 @@ func (c *GraphCommand) Run(args []string) int {
// Skip validation during graph generation - we want to see the graph even if
// it is invalid for some reason.
g, err := ctx.Graph(graphType, &terraform.ContextGraphOpts{
g, graphDiags := ctx.Graph(graphType, &terraform.ContextGraphOpts{
Verbose: verbose,
Validate: false,
})
if err != nil {
c.Ui.Error(fmt.Sprintf("Error creating graph: %s", err))
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}

View File

@ -1,15 +1,19 @@
package command
import (
"errors"
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
@ -57,24 +61,29 @@ func (c *ImportCommand) Run(args []string) int {
return 1
}
// Validate the provided resource address for syntax
addr, err := terraform.ParseResourceAddress(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf(importCommandInvalidAddressFmt, err))
var diags tfdiags.Diagnostics
// Parse the provided resource address.
traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(args[0]), "<import-address>", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(travDiags)
if travDiags.HasErrors() {
c.showDiagnostics(diags)
c.Ui.Info(importCommandInvalidAddressReference)
return 1
}
if !addr.HasResourceSpec() {
// module.foo target isn't allowed for import
c.Ui.Error(importCommandMissingResourceSpecMsg)
return 1
}
if addr.Mode != config.ManagedResourceMode {
// can't import to a data resource address
c.Ui.Error(importCommandResourceModeMsg)
addr, addrDiags := addrs.ParseAbsResourceInstance(traversal)
diags = diags.Append(addrDiags)
if addrDiags.HasErrors() {
c.showDiagnostics(diags)
c.Ui.Info(importCommandInvalidAddressReference)
return 1
}
var diags tfdiags.Diagnostics
if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
diags = diags.Append(errors.New("A managed resource address is required. Importing into a data resource is not allowed."))
c.showDiagnostics(diags)
return 1
}
if !c.dirIsConfigPath(configPath) {
diags = diags.Append(&hcl.Diagnostic{
@ -102,9 +111,9 @@ func (c *ImportCommand) Run(args []string) int {
// This is to reduce the risk that a typo in the resource address will
// import something that Terraform will want to immediately destroy on
// the next plan, and generally acts as a reassurance of user intent.
targetConfig := config.Descendent(addr.Path)
targetConfig := config.DescendentForInstance(addr.Module)
if targetConfig == nil {
modulePath := addr.WholeModuleAddress().String()
modulePath := addr.Module.String()
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Import to non-existent module",
@ -118,15 +127,16 @@ func (c *ImportCommand) Run(args []string) int {
}
targetMod := targetConfig.Module
rcs := targetMod.ManagedResources
var rc *configs.ManagedResource
var rc *configs.Resource
resourceRelAddr := addr.Resource.Resource
for _, thisRc := range rcs {
if addr.MatchesManagedResourceConfig(addr.Path, thisRc) {
if resourceRelAddr.Type == thisRc.Type && resourceRelAddr.Name == thisRc.Name {
rc = thisRc
break
}
}
if !c.Meta.allowMissingConfig && rc == nil {
modulePath := addr.WholeModuleAddress().String()
modulePath := addr.Module.String()
if modulePath == "" {
modulePath = "the root module"
}
@ -140,11 +150,33 @@ func (c *ImportCommand) Run(args []string) int {
// message.
c.Ui.Error(fmt.Sprintf(
importCommandMissingResourceFmt,
addr, modulePath, addr.Type, addr.Name,
addr, modulePath, resourceRelAddr.Type, resourceRelAddr.Name,
))
return 1
}
// Also parse the user-provided provider address, if any.
var providerAddr addrs.AbsProviderConfig
if c.Meta.provider != "" {
traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(c.Meta.provider), `-provider=...`, hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(travDiags)
if travDiags.HasErrors() {
c.showDiagnostics(diags)
c.Ui.Info(importCommandInvalidAddressReference)
return 1
}
relAddr, addrDiags := addrs.ParseProviderConfigCompact(traversal)
diags = diags.Append(addrDiags)
if addrDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
providerAddr = relAddr.Absolute(addrs.RootModuleInstance)
} else {
// Use a default address inferred from the resource type.
providerAddr = resourceRelAddr.DefaultProviderConfig().Absolute(addrs.RootModuleInstance)
}
// Check for user-supplied plugin path
if c.pluginPath, err = c.loadPluginPath(); err != nil {
c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err))
@ -200,17 +232,17 @@ func (c *ImportCommand) Run(args []string) int {
// Perform the import. Note that as you can see it is possible for this
// API to import more than one resource at once. For now, we only allow
// one while we stabilize this feature.
newState, err := ctx.Import(&terraform.ImportOpts{
newState, importDiags := ctx.Import(&terraform.ImportOpts{
Targets: []*terraform.ImportTarget{
&terraform.ImportTarget{
Addr: args[0],
ID: args[1],
Provider: c.Meta.provider,
Addr: addr,
ID: args[1],
ProviderAddr: providerAddr,
},
},
})
if err != nil {
diags = diags.Append(err)
diags = diags.Append(importDiags)
if diags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
@ -318,22 +350,8 @@ func (c *ImportCommand) Synopsis() string {
return "Import existing infrastructure into Terraform"
}
const importCommandInvalidAddressFmt = `Error: %s
For information on valid syntax, see:
https://www.terraform.io/docs/internals/resource-addressing.html
`
const importCommandMissingResourceSpecMsg = `Error: resource address must include a full resource spec
For information on valid syntax, see:
https://www.terraform.io/docs/internals/resource-addressing.html
`
const importCommandResourceModeMsg = `Error: resource address must refer to a managed resource.
Data resources cannot be imported.
`
const importCommandInvalidAddressReference = `For information on valid syntax, see:
https://www.terraform.io/docs/internals/resource-addressing.html`
const importCommandMissingResourceFmt = `[reset][bold][red]Error:[reset][bold] resource address %q does not exist in the configuration.[reset]

View File

@ -396,20 +396,11 @@ func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade
return diags
}
if err := terraform.CheckStateVersion(state); err != nil {
if err := terraform.CheckStateVersion(state, false); err != nil {
diags = diags.Append(err)
return diags
}
// FIXME: Restore this once terraform.CheckRequiredVersion is updated to
// work with a configs.Config instead of a legacy module.Tree.
/*
if err := terraform.CheckRequiredVersion(mod); err != nil {
diags = diags.Append(err)
return diags
}
*/
var available discovery.PluginMetaSet
if upgrade {
// If we're in upgrade mode, we ignore any auto-installed plugins

View File

@ -12,19 +12,17 @@ import (
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/backend/local"
"github.com/hashicorp/terraform/command/format"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/configs/configload"
"github.com/hashicorp/terraform/helper/experiment"
"github.com/hashicorp/terraform/helper/variables"
"github.com/hashicorp/terraform/helper/wrappedstreams"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/hashicorp/terraform/terraform"
@ -108,13 +106,11 @@ type Meta struct {
backendState *terraform.BackendState
// Variables for the context (private)
autoKey string
autoVariables map[string]interface{}
input bool
variables map[string]interface{}
variableArgs rawFlags
input bool
// Targets for this context (private)
targets []string
targets []addrs.Targetable
// Internal fields
color bool
@ -325,18 +321,6 @@ func (m *Meta) contextOpts() *terraform.ContextOpts {
opts.Hooks = []terraform.Hook{m.uiHook(), &terraform.DebugHook{}}
opts.Hooks = append(opts.Hooks, m.ExtraHooks...)
vs := make(map[string]interface{})
for k, v := range opts.Variables {
vs[k] = v
}
for k, v := range m.autoVariables {
vs[k] = v
}
for k, v := range m.variables {
vs[k] = v
}
opts.Variables = vs
opts.Targets = m.targets
opts.UIInput = m.UIInput()
opts.Parallelism = m.parallelism
@ -369,13 +353,15 @@ func (m *Meta) contextOpts() *terraform.ContextOpts {
func (m *Meta) flagSet(n string) *flag.FlagSet {
f := flag.NewFlagSet(n, flag.ContinueOnError)
f.BoolVar(&m.input, "input", true, "input")
f.Var((*variables.Flag)(&m.variables), "var", "variables")
f.Var((*variables.FlagFile)(&m.variables), "var-file", "variable file")
f.Var((*FlagStringSlice)(&m.targets), "target", "resource to target")
f.Var((*FlagTargetSlice)(&m.targets), "target", "resource to target")
if m.autoKey != "" {
f.Var((*variables.FlagFile)(&m.autoVariables), m.autoKey, "variable file")
if m.variableArgs.items == nil {
m.variableArgs = newRawFlags("-var")
}
varValues := m.variableArgs.Alias("-var")
varFiles := m.variableArgs.Alias("-var-file")
f.Var(varValues, "var", "variables")
f.Var(varFiles, "var-file", "variable file")
// Advanced (don't need documentation, or unlikely to be set)
f.BoolVar(&m.shadow, "shadow", true, "shadow graph")
@ -456,51 +442,6 @@ func (m *Meta) process(args []string, vars bool) ([]string, error) {
},
}
// If we support vars and the default var file exists, add it to
// the args...
m.autoKey = ""
if vars {
var preArgs []string
if _, err := os.Stat(DefaultVarsFilename); err == nil {
m.autoKey = "var-file-default"
preArgs = append(preArgs, "-"+m.autoKey, DefaultVarsFilename)
}
if _, err := os.Stat(DefaultVarsFilename + ".json"); err == nil {
m.autoKey = "var-file-default"
preArgs = append(preArgs, "-"+m.autoKey, DefaultVarsFilename+".json")
}
wd, err := os.Getwd()
if err != nil {
return nil, err
}
fis, err := ioutil.ReadDir(wd)
if err != nil {
return nil, err
}
// make sure we add the files in order
sort.Slice(fis, func(i, j int) bool {
return fis[i].Name() < fis[j].Name()
})
for _, fi := range fis {
name := fi.Name()
// Ignore directories, non-var-files, and ignored files
if fi.IsDir() || !isAutoVarFile(name) || config.IsIgnoredFile(name) {
continue
}
m.autoKey = "var-file-default"
preArgs = append(preArgs, "-"+m.autoKey, name)
}
args = append(preArgs, args...)
}
return args, nil
}

View File

@ -379,8 +379,10 @@ type rawFlags struct {
}
func newRawFlags(flagName string) rawFlags {
var items []rawFlag
return rawFlags{
flagName: flagName,
items: &items,
}
}

View File

@ -167,7 +167,7 @@ func (m *Meta) Plan(path string) (*terraform.Plan, error) {
// We do a validation here that seems odd but if any plan is given,
// we must not have set any extra variables. The plan itself contains
// the variables and those aren't overwritten.
if len(m.variables) > 0 {
if len(m.variableArgs.AllItems()) > 0 {
return nil, fmt.Errorf(
"You can't set variables with the '-var' or '-var-file' flag\n" +
"when you're applying a plan file. The variables used when\n" +

View File

@ -8,6 +8,8 @@ import (
"sort"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/tfdiags"
)
@ -72,14 +74,10 @@ func (c *OutputCommand) Run(args []string) int {
return 1
}
if module == "" {
module = "root"
} else {
module = "root." + module
}
// Get the proper module we want to get outputs for
modPath := strings.Split(module, ".")
// This command uses a legacy shorthand syntax for the module path that
// can't deal with keyed instances, so we'll just shim it for now and
// make the breaking change for this interface later.
modPath := addrs.Module(strings.Split(module, ".")).UnkeyedInstanceShim()
state := stateStore.State()
mod := state.ModuleByPath(modPath)

View File

@ -6,6 +6,7 @@ import (
"log"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/terraform"
)
@ -107,8 +108,10 @@ func (c *TaintCommand) Run(args []string) int {
return 1
}
// Get the proper module we want to taint
modPath := strings.Split(module, ".")
// Get the ModuleState where we will taint. This is provided in a legacy
// string form that doesn't support module instance keys, so we'll shim
// it here.
modPath := addrs.Module(strings.Split(module, ".")).UnkeyedInstanceShim()
mod := s.ModuleByPath(modPath)
if mod == nil {
if allowMissing {

View File

@ -6,6 +6,7 @@ import (
"log"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/command/clistate"
)
@ -95,8 +96,10 @@ func (c *UntaintCommand) Run(args []string) int {
return 1
}
// Get the proper module holding the resource we want to untaint
modPath := strings.Split(module, ".")
// Get the ModuleState where we will untaint. This is provided in a legacy
// string form that doesn't support module instance keys, so we'll shim
// it here.
modPath := addrs.Module(strings.Split(module, ".")).UnkeyedInstanceShim()
mod := s.ModuleByPath(modPath)
if mod == nil {
if allowMissing {

View File

@ -6,6 +6,7 @@ import (
"path/filepath"
"strings"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
)
@ -111,36 +112,24 @@ Options:
func (c *ValidateCommand) validate(dir string) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
_, cfgDiags := c.loadConfig(dir)
cfg, cfgDiags := c.loadConfig(dir)
diags = diags.Append(cfgDiags)
if diags.HasErrors() {
return diags
}
// TODO: run a validation walk once terraform.NewContext is updated
// to support new-style configuration.
/* old implementation of validation....
mod, modDiags := c.Module(dir)
diags = diags.Append(modDiags)
if modDiags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
opts := c.contextOpts()
opts.Module = mod
opts.Config = cfg
tfCtx, err := terraform.NewContext(opts)
if err != nil {
diags = diags.Append(err)
c.showDiagnostics(diags)
return 1
tfCtx, ctxDiags := terraform.NewContext(opts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
return diags
}
diags = diags.Append(tfCtx.Validate())
*/
validateDiags := tfCtx.Validate()
diags = diags.Append(validateDiags)
return diags
}

View File

@ -388,10 +388,10 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
var diags tfdiags.Diagnostics
var upstreamFailed bool
if depsSuccess {
log.Printf("[TRACE] dag/walk: walking %q", VertexName(v))
log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v))
diags = w.Callback(v)
} else {
log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v))
log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v))
// This won't be displayed to the user because we'll set upstreamFailed,
// but we need to ensure there's at least one error in here so that
// the failures will cascade downstream.
@ -437,7 +437,7 @@ func (w *Walker) waitDeps(
return
case <-time.After(time.Second * 5):
log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q",
log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q",
VertexName(v), VertexName(dep))
}
}

View File

@ -14,11 +14,15 @@ import (
"syscall"
"testing"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configload"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/logutils"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/helper/logging"
"github.com/hashicorp/terraform/terraform"
)
@ -686,17 +690,17 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
// doesn't have access to it and we may need things like provider
// configurations. The initial implementation of id-only checks used
// an empty config module, but that caused the aforementioned problems.
mod, err := testModule(opts, step)
cfg, err := testConfig(opts, step)
if err != nil {
return err
}
// Initialize the context
opts.Module = mod
opts.Config = cfg
opts.State = state
ctx, err := terraform.NewContext(&opts)
if err != nil {
return err
ctx, ctxDiags := terraform.NewContext(&opts)
if ctxDiags.HasErrors() {
return ctxDiags.Err()
}
if diags := ctx.Validate(); len(diags) > 0 {
if diags.HasErrors() {
@ -707,9 +711,9 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
}
// Refresh!
state, err = ctx.Refresh()
if err != nil {
return fmt.Errorf("Error refreshing: %s", err)
state, refreshDiags := ctx.Refresh()
if refreshDiags.HasErrors() {
return refreshDiags.Err()
}
// Verify attribute equivalence.
@ -756,15 +760,14 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
return nil
}
func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) {
func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) {
if step.PreConfig != nil {
step.PreConfig()
}
cfgPath, err := ioutil.TempDir("", "tf-test")
if err != nil {
return nil, fmt.Errorf(
"Error creating temporary directory for config: %s", err)
return nil, fmt.Errorf("Error creating temporary directory for config: %s", err)
}
if step.PreventDiskCleanup {
@ -773,38 +776,37 @@ func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error)
defer os.RemoveAll(cfgPath)
}
// Write the configuration
cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
// Write the main configuration file
err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm)
if err != nil {
return nil, fmt.Errorf(
"Error creating temporary file for config: %s", err)
return nil, fmt.Errorf("Error creating temporary file for config: %s", err)
}
_, err = io.Copy(cfgF, strings.NewReader(step.Config))
cfgF.Close()
// Create directory for our child modules, if any.
modulesDir := filepath.Join(cfgPath, ".modules")
err = os.Mkdir(modulesDir, os.ModePerm)
if err != nil {
return nil, fmt.Errorf(
"Error creating temporary file for config: %s", err)
return nil, fmt.Errorf("Error creating child modules directory: %s", err)
}
// Parse the configuration
mod, err := module.NewTreeModule("", cfgPath)
loader, err := configload.NewLoader(&configload.Config{
ModulesDir: modulesDir,
})
if err != nil {
return nil, fmt.Errorf(
"Error loading configuration: %s", err)
return nil, fmt.Errorf("failed to create config loader: %s", err)
}
// Load the modules
modStorage := &module.Storage{
StorageDir: filepath.Join(cfgPath, ".tfmodules"),
Mode: module.GetModeGet,
}
err = mod.Load(modStorage)
if err != nil {
return nil, fmt.Errorf("Error downloading modules: %s", err)
installDiags := loader.InstallModules(cfgPath, true, configload.InstallHooksImpl{})
if installDiags.HasErrors() {
return nil, installDiags
}
return mod, nil
config, configDiags := loader.LoadConfig(cfgPath)
if configDiags.HasErrors() {
return nil, configDiags
}
return config, nil
}
func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
@ -881,8 +883,9 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with
// support for non-root modules
func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc {
mpt := addrs.Module(mp).UnkeyedInstanceShim()
return func(s *terraform.State) error {
is, err := modulePathPrimaryInstanceState(s, mp, name)
is, err := modulePathPrimaryInstanceState(s, mpt, name)
if err != nil {
return err
}
@ -915,8 +918,9 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with
// support for non-root modules
func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc {
mpt := addrs.Module(mp).UnkeyedInstanceShim()
return func(s *terraform.State) error {
is, err := modulePathPrimaryInstanceState(s, mp, name)
is, err := modulePathPrimaryInstanceState(s, mpt, name)
if err != nil {
return err
}
@ -957,8 +961,9 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with
// support for non-root modules
func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc {
mpt := addrs.Module(mp).UnkeyedInstanceShim()
return func(s *terraform.State) error {
is, err := modulePathPrimaryInstanceState(s, mp, name)
is, err := modulePathPrimaryInstanceState(s, mpt, name)
if err != nil {
return err
}
@ -991,8 +996,9 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with
// support for non-root modules
func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc {
mpt := addrs.Module(mp).UnkeyedInstanceShim()
return func(s *terraform.State) error {
is, err := modulePathPrimaryInstanceState(s, mp, name)
is, err := modulePathPrimaryInstanceState(s, mpt, name)
if err != nil {
return err
}
@ -1052,13 +1058,15 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string
// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with
// support for non-root modules
func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc {
mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim()
mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim()
return func(s *terraform.State) error {
isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst)
isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst)
if err != nil {
return err
}
isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond)
isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond)
if err != nil {
return err
}
@ -1163,7 +1171,7 @@ func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, n
// modulePathPrimaryInstanceState returns the primary instance state for the
// given resource name in a given module path.
func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) {
func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) {
ms := s.ModuleByPath(mp)
if ms == nil {
return nil, fmt.Errorf("No module found at: %s", mp)

View File

@ -6,6 +6,8 @@ import (
"log"
"strings"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/terraform"
)
@ -30,41 +32,41 @@ func testStep(
}
}
mod, err := testModule(opts, step)
cfg, err := testConfig(opts, step)
if err != nil {
return state, err
}
var stepDiags tfdiags.Diagnostics
// Build the context
opts.Module = mod
opts.Config = cfg
opts.State = state
opts.Destroy = step.Destroy
ctx, err := terraform.NewContext(&opts)
if err != nil {
return state, fmt.Errorf("Error initializing context: %s", err)
ctx, stepDiags := terraform.NewContext(&opts)
if stepDiags.HasErrors() {
return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err())
}
if diags := ctx.Validate(); len(diags) > 0 {
if diags.HasErrors() {
return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err())
if stepDiags := ctx.Validate(); len(stepDiags) > 0 {
if stepDiags.HasErrors() {
return nil, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err())
}
log.Printf("[WARN] Config warnings:\n%s", diags)
log.Printf("[WARN] Config warnings:\n%s", stepDiags)
}
// Refresh!
state, err = ctx.Refresh()
if err != nil {
return state, fmt.Errorf(
"Error refreshing: %s", err)
state, stepDiags = ctx.Refresh()
if stepDiags.HasErrors() {
return state, fmt.Errorf("Error refreshing: %s", stepDiags.Err())
}
// If this step is a PlanOnly step, skip over this first Plan and subsequent
// Apply, and use the follow up Plan that checks for perpetual diffs
if !step.PlanOnly {
// Plan!
if p, err := ctx.Plan(); err != nil {
return state, fmt.Errorf(
"Error planning: %s", err)
if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() {
return state, fmt.Errorf("Error planning: %s", stepDiags.Err())
} else {
log.Printf("[WARN] Test: Step plan: %s", p)
}
@ -74,13 +76,13 @@ func testStep(
// function
stateBeforeApplication := state.DeepCopy()
// Apply!
state, err = ctx.Apply()
if err != nil {
return state, fmt.Errorf("Error applying: %s", err)
// Apply the diff, creating real resources.
state, stepDiags = ctx.Apply()
if stepDiags.HasErrors() {
return state, fmt.Errorf("Error applying: %s", stepDiags.Err())
}
// Check! Excitement!
// Run any configured checks
if step.Check != nil {
if step.Destroy {
if err := step.Check(stateBeforeApplication); err != nil {
@ -97,8 +99,8 @@ func testStep(
// Now, verify that Plan is now empty and we don't have a perpetual diff issue
// We do this with TWO plans. One without a refresh.
var p *terraform.Plan
if p, err = ctx.Plan(); err != nil {
return state, fmt.Errorf("Error on follow-up plan: %s", err)
if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
return state, fmt.Errorf("Error on follow-up plan: %s", stepDiags.Err())
}
if p.Diff != nil && !p.Diff.Empty() {
if step.ExpectNonEmptyPlan {
@ -111,14 +113,13 @@ func testStep(
// And another after a Refresh.
if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
state, err = ctx.Refresh()
if err != nil {
return state, fmt.Errorf(
"Error on follow-up refresh: %s", err)
state, stepDiags = ctx.Refresh()
if stepDiags.HasErrors() {
return state, fmt.Errorf("Error on follow-up refresh: %s", stepDiags.Err())
}
}
if p, err = ctx.Plan(); err != nil {
return state, fmt.Errorf("Error on second follow-up plan: %s", err)
if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
return state, fmt.Errorf("Error on second follow-up plan: %s", stepDiags.Err())
}
empty := p.Diff == nil || p.Diff.Empty()

View File

@ -6,6 +6,11 @@ import (
"reflect"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/terraform/terraform"
)
@ -41,33 +46,45 @@ func testStepImportState(
// Setup the context. We initialize with an empty state. We use the
// full config for provider configurations.
mod, err := testModule(opts, step)
cfg, err := testConfig(opts, step)
if err != nil {
return state, err
}
opts.Module = mod
opts.Config = cfg
opts.State = terraform.NewState()
ctx, err := terraform.NewContext(&opts)
if err != nil {
return state, err
ctx, stepDiags := terraform.NewContext(&opts)
if stepDiags.HasErrors() {
return state, stepDiags.Err()
}
// Do the import!
newState, err := ctx.Import(&terraform.ImportOpts{
// The test step provides the resource address as a string, so we need
// to parse it to get an addrs.AbsResourceAddress to pass in to the
// import method.
traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{})
if hclDiags.HasErrors() {
return nil, hclDiags
}
importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal)
if stepDiags.HasErrors() {
return nil, stepDiags.Err()
}
// Do the import
newState, stepDiags := ctx.Import(&terraform.ImportOpts{
// Set the module so that any provider config is loaded
Module: mod,
Config: cfg,
Targets: []*terraform.ImportTarget{
&terraform.ImportTarget{
Addr: step.ResourceName,
Addr: importAddr,
ID: importId,
},
},
})
if err != nil {
log.Printf("[ERROR] Test: ImportState failure: %s", err)
return state, err
if stepDiags.HasErrors() {
log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err())
return state, stepDiags.Err()
}
// Go through the new state and verify

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/terraform"
)
@ -121,6 +122,11 @@ func (p *Provisioner) Stop() error {
return nil
}
// GetConfigSchema implementation of terraform.ResourceProvisioner interface.
func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) {
return schemaMap(p.Schema).CoreConfigSchema(), nil
}
// Apply implementation of terraform.ResourceProvisioner interface.
func (p *Provisioner) Apply(
o terraform.UIOutput,

View File

@ -26,7 +26,6 @@ type Data interface {
GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
GetSelf(tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
}

View File

@ -12,7 +12,6 @@ type dataForTests struct {
LocalValues map[string]cty.Value
Modules map[string]cty.Value
PathAttrs map[string]cty.Value
Self cty.Value
TerraformAttrs map[string]cty.Value
InputVariables map[string]cty.Value
}
@ -49,10 +48,6 @@ func (d *dataForTests) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange)
return d.PathAttrs[addr.Name], nil
}
func (d *dataForTests) GetSelf(rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
return d.Self, nil
}
func (d *dataForTests) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
return d.TerraformAttrs[addr.Name], nil
}

View File

@ -93,6 +93,44 @@ func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfd
return val, diags
}
// EvalReference evaluates the given reference in the receiving scope and
// returns the resulting value. The value will be converted to the given type before
// it is returned if possible, or else an error diagnostic will be produced
// describing the conversion error.
//
// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion
// and just obtain the returned value directly.
//
// If the returned diagnostics contains errors then the result may be
// incomplete, but will always be of the requested type.
func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
// We cheat a bit here and just build an EvalContext for our requested
// reference with the "self" address overridden, and then pull the "self"
// result out of it to return.
ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject)
diags = diags.Append(ctxDiags)
val := ctx.Variables["self"]
if val == cty.NilVal {
val = cty.DynamicVal
}
var convErr error
val, convErr = convert.Convert(val, wantType)
if convErr != nil {
val = cty.UnknownVal(wantType)
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect value type",
Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)),
Subject: ref.SourceRange.ToHCL().Ptr(),
})
}
return val, diags
}
// EvalContext constructs a HCL expression evaluation context whose variable
// scope contains sufficient values to satisfy the given set of references.
//
@ -100,6 +138,10 @@ func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfd
// this type offers, but this is here for less common situations where the
// caller will handle the evaluation calls itself.
func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) {
return s.evalContext(refs, s.SelfAddr)
}
func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
vals := make(map[string]cty.Value)
funcs := s.Functions()
@ -134,17 +176,37 @@ func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.
for _, ref := range refs {
rng := ref.SourceRange
isSelf := false
if ref.Subject == addrs.Self {
val, valDiags := normalizeRefValue(s.Data.GetSelf(ref.SourceRange))
diags = diags.Append(valDiags)
self = val
continue
rawSubj := ref.Subject
if rawSubj == addrs.Self {
if selfAddr == nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: `Invalid "self" reference`,
// This detail message mentions some current practice that
// this codepath doesn't really "know about". If the "self"
// object starts being supported in more contexts later then
// we'll need to adjust this message.
Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`,
Subject: ref.SourceRange.ToHCL().Ptr(),
})
continue
}
// Treat "self" as an alias for the configured self address.
rawSubj = selfAddr
isSelf = true
if rawSubj == addrs.Self {
// Programming error: the self address cannot alias itself.
panic("scope SelfAddr attempting to alias itself")
}
}
// This type switch must cover all of the "Referenceable" implementations
// in package addrs.
switch subj := ref.Subject.(type) {
switch subj := rawSubj.(type) {
case addrs.ResourceInstance:
var into map[string]map[string]map[addrs.InstanceKey]cty.Value
@ -168,6 +230,9 @@ func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.
into[r.Type][r.Name] = make(map[addrs.InstanceKey]cty.Value)
}
into[r.Type][r.Name][subj.Key] = val
if isSelf {
self = val
}
case addrs.ModuleCallInstance:
val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng))
@ -177,6 +242,9 @@ func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.
wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value)
}
wholeModules[subj.Call.Name][subj.Key] = val
if isSelf {
self = val
}
case addrs.ModuleCallOutput:
val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng))
@ -191,35 +259,53 @@ func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.
moduleOutputs[callName][callKey] = make(map[string]cty.Value)
}
moduleOutputs[callName][callKey][subj.Name] = val
if isSelf {
self = val
}
case addrs.InputVariable:
val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng))
diags = diags.Append(valDiags)
inputVariables[subj.Name] = val
if isSelf {
self = val
}
case addrs.LocalValue:
val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng))
diags = diags.Append(valDiags)
localValues[subj.Name] = val
if isSelf {
self = val
}
case addrs.PathAttr:
val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng))
diags = diags.Append(valDiags)
pathAttrs[subj.Name] = val
if isSelf {
self = val
}
case addrs.TerraformAttr:
val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng))
diags = diags.Append(valDiags)
terraformAttrs[subj.Name] = val
if isSelf {
self = val
}
case addrs.CountAttr:
val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng))
diags = diags.Append(valDiags)
countAttrs[subj.Name] = val
if isSelf {
self = val
}
default:
// Should never happen
panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", ref.Subject))
panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj))
}
}

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"testing"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/hcl2/hcl"
@ -50,9 +51,6 @@ func TestScopeEvalContext(t *testing.T) {
PathAttrs: map[string]cty.Value{
"module": cty.StringVal("foo/bar"),
},
Self: cty.ObjectVal(map[string]cty.Value{
"is_self": cty.True,
}),
TerraformAttrs: map[string]cty.Value{
"workspace": cty.StringVal("default"),
},
@ -192,8 +190,19 @@ func TestScopeEvalContext(t *testing.T) {
{
`self.baz`,
map[string]cty.Value{
// In the test function below we set "SelfAddr" to be
// one of the resources in our dataset, causing it to get
// expanded here and then copied into "self".
"null_resource": cty.ObjectVal(map[string]cty.Value{
"multi": cty.TupleVal([]cty.Value{
cty.DynamicVal,
cty.ObjectVal(map[string]cty.Value{
"attr": cty.StringVal("multi1"),
}),
}),
}),
"self": cty.ObjectVal(map[string]cty.Value{
"is_self": cty.True,
"attr": cty.StringVal("multi1"),
}),
},
},
@ -233,6 +242,17 @@ func TestScopeEvalContext(t *testing.T) {
scope := &Scope{
Data: data,
// "self" will just be an arbitrary one of the several resource
// instances we have in our test dataset.
SelfAddr: addrs.ResourceInstance{
Resource: addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "null_resource",
Name: "multi",
},
Key: addrs.IntKey(1),
},
}
ctx, ctxDiags := scope.EvalContext(refs)
if ctxDiags.HasErrors() {

View File

@ -48,6 +48,9 @@ func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnos
// A block schema must be provided so that this function can determine where in
// the body variables are expected.
func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) {
if body == nil {
return nil, nil
}
spec := schema.DecoderSpec()
traversals := hcldec.Variables(body, spec)
return References(traversals)
@ -57,6 +60,9 @@ func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Refe
// the given expression for traversals, before converting those traversals
// to references.
func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) {
if expr == nil {
return nil, nil
}
traversals := expr.Variables()
return References(traversals)
}

View File

@ -4,6 +4,8 @@ import (
"sync"
"github.com/zclconf/go-cty/cty/function"
"github.com/hashicorp/terraform/addrs"
)
// Scope is the main type in this package, allowing dynamic evaluation of
@ -13,6 +15,10 @@ type Scope struct {
// Data is used to resolve references in expressions.
Data Data
// SelfAddr is the address that the "self" object should be an alias of,
// or nil if the "self" object should not be available at all.
SelfAddr addrs.Referenceable
// BaseDir is the base directory used by any interpolation functions that
// accept filesystem paths as arguments.
BaseDir string

View File

@ -8,12 +8,14 @@ import (
"strings"
"sync"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/version"
)
@ -55,15 +57,15 @@ type ContextOpts struct {
Destroy bool
Diff *Diff
Hooks []Hook
Module *module.Tree
Config *configs.Config
Parallelism int
State *State
StateFutureAllowed bool
ProviderResolver ResourceProviderResolver
Provisioners map[string]ResourceProvisionerFactory
Shadow bool
Targets []string
Variables map[string]interface{}
Targets []addrs.Targetable
Variables InputValues
// If non-nil, will apply as additional constraints on the provider
// plugins that will be requested from the provider resolver.
@ -97,18 +99,18 @@ type Context struct {
diffLock sync.RWMutex
hooks []Hook
meta *ContextMeta
module *module.Tree
config *configs.Config
sh *stopHook
shadow bool
state *State
stateLock sync.RWMutex
targets []string
targets []addrs.Targetable
uiInput UIInput
variables map[string]interface{}
variables InputValues
l sync.Mutex // Lock acquired during any task
parallelSem Semaphore
providerInputConfig map[string]map[string]interface{}
providerInputConfig map[string]map[string]cty.Value
providerSHA256s map[string][]byte
runLock sync.Mutex
runCond *sync.Cond
@ -119,15 +121,18 @@ type Context struct {
// NewContext creates a new Context structure.
//
// Once a Context is creator, the pointer values within ContextOpts
// should not be mutated in any way, since the pointers are copied, not
// the values themselves.
func NewContext(opts *ContextOpts) (*Context, error) {
// Validate the version requirement if it is given
if opts.Module != nil {
if err := CheckRequiredVersion(opts.Module); err != nil {
return nil, err
}
// Once a Context is created, the caller should not access or mutate any of
// the objects referenced (directly or indirectly) by the ContextOpts fields.
//
// If the returned diagnostics contains errors then the resulting context is
// invalid and must not be used.
func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
diags := CheckCoreVersionRequirements(opts.Config)
// If version constraints are not met then we'll bail early since otherwise
// we're likely to just see a bunch of other errors related to
// incompatibilities, which could be overwhelming for the user.
if diags.HasErrors() {
return nil, diags
}
// Copy all the hooks and add our stop hook. We don't append directly
@ -145,8 +150,9 @@ func NewContext(opts *ContextOpts) (*Context, error) {
// If our state is from the future, then error. Callers can avoid
// this error by explicitly setting `StateFutureAllowed`.
if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed {
return nil, err
if stateDiags := CheckStateVersion(state, opts.StateFutureAllowed); stateDiags.HasErrors() {
diags = diags.Append(stateDiags)
return nil, diags
}
// Explicitly reset our state version to our current version so that
@ -168,27 +174,28 @@ func NewContext(opts *ContextOpts) (*Context, error) {
// 2 - Take values specified in -var flags, overriding values
// set by environment variables if necessary. This includes
// values taken from -var-file in addition.
variables := make(map[string]interface{})
if opts.Module != nil {
var err error
variables, err = Variables(opts.Module, opts.Variables)
if err != nil {
return nil, err
}
var variables InputValues
if opts.Config != nil {
// Default variables from the configuration seed our map.
variables = DefaultVariableValues(opts.Config.Module.Variables)
}
// Variables provided by the caller (from CLI, environment, etc) can
// override the defaults.
variables = variables.Override(opts.Variables)
// Bind available provider plugins to the constraints in config
var providers map[string]ResourceProviderFactory
if opts.ProviderResolver != nil {
var err error
deps := ModuleTreeDependencies(opts.Module, state)
deps := ConfigTreeDependencies(opts.Config, state)
reqd := deps.AllPluginRequirements()
if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
reqd.LockExecutables(opts.ProviderSHA256s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
diags = diags.Append(err)
return nil, diags
}
} else {
providers = make(map[string]ResourceProviderFactory)
@ -208,7 +215,7 @@ func NewContext(opts *ContextOpts) (*Context, error) {
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
config: opts.Config,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
@ -216,7 +223,7 @@ func NewContext(opts *ContextOpts) (*Context, error) {
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerInputConfig: make(map[string]map[string]cty.Value),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
@ -233,7 +240,7 @@ type ContextGraphOpts struct {
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
@ -242,7 +249,7 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Config: c.config,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
@ -250,7 +257,7 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
}).Build(addrs.RootModuleInstance)
case GraphTypeInput:
// The input graph is just a slightly modified plan graph
@ -261,7 +268,7 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
Config: c.config,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
@ -280,27 +287,29 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
return b.Build(addrs.RootModuleInstance)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
Config: c.config,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}).Build(addrs.RootModuleInstance)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
Config: c.config,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
}).Build(addrs.RootModuleInstance)
return nil, fmt.Errorf("unknown graph type: %s", typ)
default:
// Should never happen, because the above is exhaustive for all graph types.
panic(fmt.Errorf("unsupported graph type %s", typ))
}
}
// ShadowError returns any errors caught during a shadow operation.
@ -337,99 +346,91 @@ func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
// Evaluator returns an Evaluator that references this context's state, and
// that can be used to obtain data for expression evaluation within the
// receiving context.
func (c *Context) Evaluator() *Evaluator {
return &Evaluator{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
Config: c.config,
State: c.state,
StateLock: &c.stateLock,
RootVariableValues: c.variables,
}
}
// Interpolater is no longer used. Use Evaluator instead.
//
// The interpolator returned from this function will return an error on any use.
func (c *Context) Interpolater() *Interpolater {
// FIXME: Remove this once all callers are updated to no longer use it.
return &Interpolater{}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
func (c *Context) Input(mode InputMode) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
configs := c.config.Module.Variables
names := make([]string, 0, len(configs))
for name := range configs {
names = append(names, name)
}
sort.Strings(names)
Variables:
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
v := configs[n]
// If we only care about unset variables, then we should set any
// variable that is already set.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
if _, isSet := c.variables[n]; isSet {
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
log.Println("[WARN] Context.uiInput is nil during input walk")
continue
}
// Ask the user for a value for this variable
var value string
var rawValue string
retry := 0
for {
var err error
value, err = c.uiInput.Input(&InputOpts{
rawValue, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to request interactive input",
fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err),
))
return diags
}
if value == "" && v.Required() {
if rawValue == "" && v.Default == cty.NilVal {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Required variable not assigned",
fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n),
))
continue Variables
}
retry++
continue
@ -438,18 +439,15 @@ func (c *Context) Input(mode InputMode) error {
break
}
// no value provided, so don't set the variable at all
if value == "" {
val, valDiags := v.ParsingMode.Parse(n, rawValue)
diags = diags.Append(valDiags)
if diags.HasErrors() {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
c.variables[n] = &InputValue{
Value: val,
SourceType: ValueFromInput,
}
}
}
@ -458,16 +456,18 @@ func (c *Context) Input(mode InputMode) error {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
diags = diags.Append(err)
return diags
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
diags = diags.Append(err)
return diags
}
}
return nil
return diags
}
// Apply applies the changes represented by this context and returns
@ -484,23 +484,16 @@ func (c *Context) Input(mode InputMode) error {
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
func (c *Context) Apply() (*State, tfdiags.Diagnostics) {
defer c.acquireRun("apply")()
// Check there are no empty target parameter values
for _, target := range c.targets {
if target == "" {
return nil, fmt.Errorf("Target parameter must not have empty value")
}
}
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
graph, diags := c.Graph(GraphTypeApply, nil)
if diags.HasErrors() {
return nil, diags
}
// Determine the operation
@ -510,15 +503,14 @@ func (c *Context) Apply() (*State, error) {
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
walker, walkDiags := c.walk(graph, operation)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
// Clean out any unused things
c.state.prune()
return c.state, err
return c.state, diags
}
// Plan generates an execution plan for the given context.
@ -528,21 +520,22 @@ func (c *Context) Apply() (*State, error) {
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
func (c *Context) Plan() (*Plan, tfdiags.Diagnostics) {
defer c.acquireRun("plan")()
// Check there are no empty target parameter values
for _, target := range c.targets {
if target == "" {
return nil, fmt.Errorf("Target parameter must not have empty value")
}
// The Plan struct wants the legacy-style of targets as a simple []string,
// so we must shim that here.
legacyTargets := make([]string, len(c.targets))
for i, addr := range c.targets {
legacyTargets[i] = addr.String()
}
var diags tfdiags.Diagnostics
p := &Plan{
Module: c.module,
Vars: c.variables,
Config: c.config,
Vars: c.variables.JustValues(),
State: c.state,
Targets: c.targets,
Targets: legacyTargets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
@ -581,15 +574,18 @@ func (c *Context) Plan() (*Plan, error) {
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
graph, graphDiags := c.Graph(graphType, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return nil, diags
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
walker, walkDiags := c.walk(graph, operation)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return nil, diags
}
p.Diff = c.diff
@ -604,23 +600,7 @@ func (c *Context) Plan() (*Plan, error) {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
return p, diags
}
// Refresh goes through all the resources in the state and refreshes them
@ -629,27 +609,29 @@ func (c *Context) Plan() (*Plan, error) {
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
func (c *Context) Refresh() (*State, tfdiags.Diagnostics) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
graph, diags := c.Graph(GraphTypeRefresh, nil)
if diags.HasErrors() {
return nil, diags
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
_, walkDiags := c.walk(graph, walkRefresh)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return nil, diags
}
// Clean out any unused things
c.state.prune()
return c.state, nil
return c.state, diags
}
// Stop stops the running task.
@ -681,26 +663,26 @@ func (c *Context) Stop() {
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
// Validate performs semantic validation of the configuration, and returning
// any warnings or errors.
//
// Syntax and structural checks are performed by the configuration loader,
// and so are not repeated here.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
// Validate input variables. We do this only for the values supplied
// by the root module, since child module calls are validated when we
// visit their graph nodes.
if c.config != nil {
varDiags := checkInputVariables(c.config.Module.Variables, c.variables)
diags = diags.Append(varDiags)
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
// If we have errors at this point then we probably won't be able to
// construct a graph without producing redundant errors, so we'll halt early.
if diags.HasErrors() {
return diags
}
@ -709,48 +691,41 @@ func (c *Context) Validate() tfdiags.Diagnostics {
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
graph, graphDiags := c.Graph(GraphTypeValidate, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
walker, walkDiags := c.walk(graph, walkValidate)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return diags
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
// Config returns the configuration tree associated with this context.
func (c *Context) Config() *configs.Config {
return c.config
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
func (c *Context) Variables() InputValues {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) SetVariable(k string, v interface{}) {
c.variables[k] = v
func (c *Context) SetVariable(k string, v cty.Value) {
c.variables[k] = &InputValue{
Value: v,
SourceType: ValueFromCaller,
}
}
func (c *Context) acquireRun(phase string) func() {
@ -807,7 +782,7 @@ func (c *Context) releaseRun() {
c.runContext = nil
}
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) {
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) {
// Keep track of the "real" context which is the context that does
// the real work: talking to real providers, modifying real state, etc.
realCtx := c
@ -824,13 +799,13 @@ func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalk
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
realErr := graph.Walk(walker)
diags := graph.Walk(walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)
<-watchWait
return walker, realErr
return walker, diags
}
// watchStop immediately returns a `stop` and a `wait` chan after dispatching

View File

@ -1,7 +1,9 @@
package terraform
import (
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
)
// ImportOpts are used as the configuration for Import.
@ -9,23 +11,23 @@ type ImportOpts struct {
// Targets are the targets to import
Targets []*ImportTarget
// Module is optional, and specifies a config module that is loaded
// into the graph and evaluated. The use case for this is to provide
// provider configuration.
Module *module.Tree
// Config is optional, and specifies a config tree that will be loaded
// into the graph and evaluated. This is the source for provider
// configurations.
Config *configs.Config
}
// ImportTarget is a single resource to import.
type ImportTarget struct {
// Addr is the full resource address of the resource to import.
// Example: "module.foo.aws_instance.bar"
Addr string
// Addr is the address for the resource instance that the new object should
// be imported into.
Addr addrs.AbsResourceInstance
// ID is the ID of the resource to import. This is resource-specific.
ID string
// Provider string
Provider string
// ProviderAddr is the address of the provider that should handle the import.
ProviderAddr addrs.AbsProviderConfig
}
// Import takes already-created external resources and brings them
@ -38,7 +40,9 @@ type ImportTarget struct {
// Further, this operation also gracefully handles partial state. If during
// an import there is a failure, all previously imported resources remain
// imported.
func (c *Context) Import(opts *ImportOpts) (*State, error) {
func (c *Context) Import(opts *ImportOpts) (*State, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
// Hold a lock since we can modify our own state here
defer c.acquireRun("import")()
@ -47,31 +51,34 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) {
// If no module is given, default to the module configured with
// the Context.
module := opts.Module
if module == nil {
module = c.module
config := opts.Config
if config == nil {
config = c.config
}
// Initialize our graph builder
builder := &ImportGraphBuilder{
ImportTargets: opts.Targets,
Module: module,
Config: config,
Providers: c.components.ResourceProviders(),
}
// Build the graph!
graph, err := builder.Build(RootModulePath)
if err != nil {
return c.state, err
graph, graphDiags := builder.Build(addrs.RootModuleInstance)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return c.state, diags
}
// Walk it
if _, err := c.walk(graph, walkImport); err != nil {
return c.state, err
_, walkDiags := c.walk(graph, walkImport)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return c.state, diags
}
// Clean the state
c.state.prune()
return c.state, nil
return c.state, diags
}

View File

@ -10,6 +10,8 @@ import (
"strings"
"sync"
"github.com/hashicorp/terraform/addrs"
"github.com/mitchellh/copystructure"
)
@ -69,8 +71,24 @@ func (d *Diff) Prune() {
//
// This should be the preferred method to add module diffs since it
// allows us to optimize lookups later as well as control sorting.
func (d *Diff) AddModule(path []string) *ModuleDiff {
m := &ModuleDiff{Path: path}
func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff {
// Lower the new-style address into a legacy-style address.
// This requires that none of the steps have instance keys, which is
// true for all addresses at the time of implementing this because
// "count" and "for_each" are not yet implemented for modules.
legacyPath := make([]string, len(path))
for i, step := range path {
if step.InstanceKey != addrs.NoKey {
// FIXME: Once the rest of Terraform is ready to use count and
// for_each, remove all of this and just write the addrs.ModuleInstance
// value itself into the ModuleState.
panic("diff cannot represent modules with count or for_each keys")
}
legacyPath[i] = step.Name
}
m := &ModuleDiff{Path: legacyPath}
m.init()
d.Modules = append(d.Modules, m)
return m
@ -79,7 +97,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff {
// ModuleByPath is used to lookup the module diff for the given path.
// This should be the preferred lookup mechanism as it allows for future
// lookup optimizations.
func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff {
if d == nil {
return nil
}
@ -87,7 +105,8 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
if mod.Path == nil {
panic("missing module path")
}
if reflect.DeepEqual(mod.Path, path) {
modPath := normalizeModulePath(mod.Path)
if modPath.String() == path.String() {
return mod
}
}
@ -96,7 +115,7 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
// RootModule returns the ModuleState for the root module
func (d *Diff) RootModule() *ModuleDiff {
root := d.ModuleByPath(rootModulePath)
root := d.ModuleByPath(addrs.RootModuleInstance)
if root == nil {
panic("missing root module")
}

View File

@ -2,7 +2,6 @@ package terraform
import (
"log"
"strings"
)
// EvalNode is the interface that must be implemented by graph nodes to
@ -46,7 +45,10 @@ func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
path := "unknown"
if ctx != nil {
path = strings.Join(ctx.Path(), ".")
path = ctx.Path().String()
}
if path == "" {
path = "<root>"
}
log.Printf("[TRACE] %s: eval: %T", path, n)

View File

@ -3,16 +3,20 @@ package terraform
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/go-multierror"
"github.com/zclconf/go-cty/cty/gocty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
)
// EvalApply is an EvalNode implementation that writes the diff to
// the full diff.
type EvalApply struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
State **InstanceState
Diff **InstanceDiff
Provider *ResourceProvider
@ -27,10 +31,11 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
provider := *n.Provider
state := *n.State
// If we have no diff, we have nothing to do!
// The provider API still expects our legacy InstanceInfo type, so we must shim it.
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path()).ContainingResource())
if diff.Empty() {
log.Printf(
"[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
log.Printf("[DEBUG] apply %s: diff is empty, so skipping.", n.Addr)
return nil, nil
}
@ -53,8 +58,8 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
}
// With the completed diff, apply!
log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
state, err := provider.Apply(n.Info, state, diff)
log.Printf("[DEBUG] apply %s: executing Apply", n.Addr)
state, err := provider.Apply(legacyInfo, state, diff)
if state == nil {
state = new(InstanceState)
}
@ -84,7 +89,7 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
// if we have one, otherwise we just output it.
if err != nil {
if n.Error != nil {
helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
helpfulErr := fmt.Errorf("%s: %s", n.Addr, err.Error())
*n.Error = multierror.Append(*n.Error, helpfulErr)
} else {
return nil, err
@ -96,7 +101,7 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
type EvalApplyPre struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
State **InstanceState
Diff **InstanceDiff
}
@ -106,16 +111,20 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
state := *n.State
diff := *n.Diff
// The hook API still uses our legacy InstanceInfo type, so we must
// shim it.
legacyInfo := NewInstanceInfo(n.Addr.ContainingResource().Absolute(ctx.Path()))
// If the state is nil, make it non-nil
if state == nil {
state = new(InstanceState)
}
state.init()
if resourceHasUserVisibleApply(n.Info) {
if resourceHasUserVisibleApply(legacyInfo) {
// Call post-apply hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreApply(n.Info, state, diff)
return h.PreApply(legacyInfo, state, diff)
})
if err != nil {
return nil, err
@ -127,7 +136,7 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
// EvalApplyPost is an EvalNode implementation that does the post-Apply work
type EvalApplyPost struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
State **InstanceState
Error *error
}
@ -136,10 +145,14 @@ type EvalApplyPost struct {
func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
state := *n.State
if resourceHasUserVisibleApply(n.Info) {
// The hook API still uses our legacy InstanceInfo type, so we must
// shim it.
legacyInfo := NewInstanceInfo(n.Addr.ContainingResource().Absolute(ctx.Path()))
if resourceHasUserVisibleApply(legacyInfo) {
// Call post-apply hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostApply(n.Info, state, *n.Error)
return h.PostApply(legacyInfo, state, *n.Error)
})
if err != nil {
return nil, err
@ -171,21 +184,23 @@ func resourceHasUserVisibleApply(info *InstanceInfo) bool {
// TODO(mitchellh): This should probably be split up into a more fine-grained
// ApplyProvisioner (single) that is looped over.
type EvalApplyProvisioners struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
State **InstanceState
Resource *config.Resource
InterpResource *Resource
ResourceConfig *configs.Resource
CreateNew *bool
Error *error
// When is the type of provisioner to run at this point
When config.ProvisionerWhen
When configs.ProvisionerWhen
}
// TODO: test
func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
state := *n.State
// The hook API still uses the legacy InstanceInfo type, so we need to shim it.
legacyInfo := NewInstanceInfo(n.Addr.Resource.Absolute(ctx.Path()))
if n.CreateNew != nil && !*n.CreateNew {
// If we're not creating a new resource, then don't run provisioners
return nil, nil
@ -198,7 +213,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
}
// taint tells us whether to enable tainting.
taint := n.When == config.ProvisionerWhenCreate
taint := n.When == configs.ProvisionerWhenCreate
if n.Error != nil && *n.Error != nil {
if taint {
@ -212,7 +227,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
{
// Call pre hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreProvisionResource(n.Info, state)
return h.PreProvisionResource(legacyInfo, state)
})
if err != nil {
return nil, err
@ -234,7 +249,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
{
// Call post hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostProvisionResource(n.Info, state)
return h.PostProvisionResource(legacyInfo, state)
})
if err != nil {
return nil, err
@ -246,18 +261,18 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
// filterProvisioners filters the provisioners on the resource to only
// the provisioners specified by the "when" option.
func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner {
// Fast path the zero case
if n.Resource == nil {
if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil {
return nil
}
if len(n.Resource.Provisioners) == 0 {
if len(n.ResourceConfig.Managed.Provisioners) == 0 {
return nil
}
result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
for _, p := range n.Resource.Provisioners {
result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners))
for _, p := range n.ResourceConfig.Managed.Provisioners {
if p.When == n.When {
result = append(result, p)
}
@ -266,64 +281,68 @@ func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
return result
}
func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error {
instanceAddr := n.Addr
state := *n.State
// The hook API still uses the legacy InstanceInfo type, so we need to shim it.
legacyInfo := NewInstanceInfo(n.Addr.Resource.Absolute(ctx.Path()))
// Store the original connection info, restore later
origConnInfo := state.Ephemeral.ConnInfo
defer func() {
state.Ephemeral.ConnInfo = origConnInfo
}()
var diags tfdiags.Diagnostics
for _, prov := range provs {
// Get the provisioner
provisioner := ctx.Provisioner(prov.Type)
schema := ctx.ProvisionerSchema(prov.Type)
// Interpolate the provisioner config
provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
if err != nil {
return err
// Evaluate the main provisioner configuration.
config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr)
diags = diags.Append(configDiags)
connInfo, _, connInfoDiags := ctx.EvaluateBlock(prov.Config, connectionBlockSupersetSchema, instanceAddr)
diags = diags.Append(connInfoDiags)
if configDiags.HasErrors() || connInfoDiags.HasErrors() {
continue
}
// Interpolate the conn info, since it may contain variables
connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
if err != nil {
return err
}
// Merge the connection information
// Merge the connection information, and also lower everything to strings
// for compatibility with the communicator API.
overlay := make(map[string]string)
if origConnInfo != nil {
for k, v := range origConnInfo {
overlay[k] = v
}
}
for k, v := range connInfo.Config {
switch vt := v.(type) {
case string:
overlay[k] = vt
case int64:
overlay[k] = strconv.FormatInt(vt, 10)
case int32:
overlay[k] = strconv.FormatInt(int64(vt), 10)
case int:
overlay[k] = strconv.FormatInt(int64(vt), 10)
case float32:
overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
case float64:
overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
case bool:
overlay[k] = strconv.FormatBool(vt)
default:
overlay[k] = fmt.Sprintf("%v", vt)
for it := connInfo.ElementIterator(); it.Next(); {
kv, vv := it.Element()
var k, v string
err := gocty.FromCtyValue(kv, &k)
if err != nil {
// Should never happen, because connectionBlockSupersetSchema requires all primitives
panic(err)
}
err = gocty.FromCtyValue(vv, &v)
if err != nil {
// Should never happen, because connectionBlockSupersetSchema requires all primitives
panic(err)
}
overlay[k] = v
}
state.Ephemeral.ConnInfo = overlay
{
// Call pre hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreProvision(n.Info, prov.Type)
return h.PreProvision(legacyInfo, prov.Type)
})
if err != nil {
return err
@ -333,30 +352,31 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision
// The output function
outputFn := func(msg string) {
ctx.Hook(func(h Hook) (HookAction, error) {
h.ProvisionOutput(n.Info, prov.Type, msg)
h.ProvisionOutput(legacyInfo, prov.Type, msg)
return HookActionContinue, nil
})
}
// The provisioner API still uses our legacy ResourceConfig type, so
// we need to shim it.
legacyRC := NewResourceConfigShimmed(config, schema)
// Invoke the Provisioner
output := CallbackUIOutput{OutputFn: outputFn}
applyErr := provisioner.Apply(&output, state, provConfig)
applyErr := provisioner.Apply(&output, state, legacyRC)
// Call post hook
hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostProvision(n.Info, prov.Type, applyErr)
return h.PostProvision(legacyInfo, prov.Type, applyErr)
})
// Handle the error before we deal with the hook
if applyErr != nil {
// Determine failure behavior
switch prov.OnFailure {
case config.ProvisionerOnFailureContinue:
log.Printf(
"[INFO] apply: %s [%s]: error during provision, continue requested",
n.Info.Id, prov.Type)
case config.ProvisionerOnFailureFail:
case configs.ProvisionerOnFailureContinue:
log.Printf("[INFO] apply %s [%s]: error during provision, but continuing as requested in configuration", n.Addr, prov.Type)
case configs.ProvisionerOnFailureFail:
return applyErr
}
}
@ -367,6 +387,5 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision
}
}
return nil
return diags.ErrWithWarnings()
}

View File

@ -3,33 +3,42 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
)
// EvalPreventDestroy is an EvalNode implementation that returns an
// error if a resource has PreventDestroy configured and the diff
// would destroy the resource.
type EvalCheckPreventDestroy struct {
Resource *config.Resource
ResourceId string
Diff **InstanceDiff
Addr addrs.ResourceInstance
Config *configs.Resource
Diff **InstanceDiff
}
func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
if n.Diff == nil || *n.Diff == nil || n.Config == nil || n.Config.Managed == nil {
return nil, nil
}
diff := *n.Diff
preventDestroy := n.Resource.Lifecycle.PreventDestroy
preventDestroy := n.Config.Managed.PreventDestroy
if diff.GetDestroy() && preventDestroy {
resourceId := n.ResourceId
if resourceId == "" {
resourceId = n.Resource.Id()
}
return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
var diags tfdiags.Diagnostics
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Instance cannot be destroyed",
Detail: fmt.Sprintf(
"Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.",
n.Addr.Absolute(ctx.Path()).String(),
),
Subject: &n.Config.DeclRange,
})
return nil, diags.Err()
}
return nil, nil

View File

@ -3,7 +3,11 @@ package terraform
import (
"sync"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// EvalContext is the interface that is given to eval nodes to execute.
@ -13,7 +17,7 @@ type EvalContext interface {
Stopped() <-chan struct{}
// Path is the current module path.
Path() []string
Path() addrs.ModuleInstance
// Hook is used to call hook methods. The callback is called for each
// hook and should return the hook action to take and the error.
@ -22,33 +26,41 @@ type EvalContext interface {
// Input is the UIInput object for interacting with the UI.
Input() UIInput
// InitProvider initializes the provider with the given type and name, and
// InitProvider initializes the provider with the given type and address, and
// returns the implementation of the resource provider or an error.
//
// It is an error to initialize the same provider more than once.
InitProvider(typ string, name string) (ResourceProvider, error)
InitProvider(typ string, addr addrs.ProviderConfig) (ResourceProvider, error)
// Provider gets the provider instance with the given name (already
// Provider gets the provider instance with the given address (already
// initialized) or returns nil if the provider isn't initialized.
Provider(string) ResourceProvider
//
// This method expects an _absolute_ provider configuration address, since
// resources in one module are able to use providers from other modules.
// InitProvider must've been called on the EvalContext of the module
// that owns the given provider before calling this method.
Provider(addrs.AbsProviderConfig) ResourceProvider
// ProviderSchema retrieves the schema for a particular provider, which
// must have already be initialized with InitProvider.
ProviderSchema(string) *ProviderSchema
// must have already been initialized with InitProvider.
//
// This method expects an _absolute_ provider configuration address, since
// resources in one module are able to use providers from other modules.
ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema
// CloseProvider closes provider connections that aren't needed anymore.
CloseProvider(string) error
CloseProvider(addrs.ProviderConfig) error
// ConfigureProvider configures the provider with the given
// configuration. This is a separate context call because this call
// is used to store the provider configuration for inheritance lookups
// with ParentProviderConfig().
ConfigureProvider(string, *ResourceConfig) error
ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics
// ProviderInput and SetProviderInput are used to configure providers
// from user input.
ProviderInput(string) map[string]interface{}
SetProviderInput(string, map[string]interface{})
ProviderInput(addrs.ProviderConfig) map[string]cty.Value
SetProviderInput(addrs.ProviderConfig, map[string]cty.Value)
// InitProvisioner initializes the provisioner with the given name and
// returns the implementation of the resource provisioner or an error.
@ -60,28 +72,43 @@ type EvalContext interface {
// initialized) or returns nil if the provisioner isn't initialized.
Provisioner(string) ResourceProvisioner
// ProvisionerSchema retrieves the main configuration schema for a
// particular provisioner, which must have already been initialized with
// InitProvisioner.
ProvisionerSchema(string) *configschema.Block
// CloseProvisioner closes provisioner connections that aren't needed
// anymore.
CloseProvisioner(string) error
// Interpolate takes the given raw configuration and completes
// the interpolations, returning the processed ResourceConfig.
// EvaluateBlock takes the given raw configuration block and associated
// schema and evaluates it to produce a value of an object type that
// conforms to the implied type of the schema.
//
// The resource argument is optional. If given, it is the resource
// that is currently being acted upon.
Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
// The "self" argument is optional. If given, it is the referenceable
// address that the name "self" should behave as an alias for when
// evaluating. Set this to nil if the "self" object should not be available.
//
// The returned body is an expanded version of the given body, with any
// "dynamic" blocks replaced with zero or more static blocks. This can be
// used to extract correct source location information about attributes of
// the returned object value.
EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable) (cty.Value, hcl.Body, tfdiags.Diagnostics)
// InterpolateProvider takes a ProviderConfig and interpolates it with the
// stored interpolation scope. Since provider configurations can be
// inherited, the interpolation scope may be different from the current
// context path. Interplation is otherwise executed the same as in the
// Interpolation method.
InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error)
// EvaluateExpr takes the given HCL expression and evaluates it to produce
// a value.
//
// The "self" argument is optional. If given, it is the referenceable
// address that the name "self" should behave as an alias for when
// evaluating. Set this to nil if the "self" object should not be available.
EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics)
// SetVariables sets the variables for the module within
// this context with the name n. This function call is additive:
// the second parameter is merged with any previous call.
SetVariables(string, map[string]interface{})
// SetModuleCallArguments defines values for the variables of a particular
// child module call.
//
// Calling this function multiple times has merging behavior, keeping any
// previously-set keys that are not present in the new map.
SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value)
// Diff returns the global diff as well as the lock that should
// be used to modify that diff.

View File

@ -6,7 +6,12 @@ import (
"log"
"sync"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/zclconf/go-cty/cty"
)
// BuiltinEvalContext is an EvalContext implementation that is used by
@ -16,27 +21,24 @@ type BuiltinEvalContext struct {
StopContext context.Context
// PathValue is the Path that this context is operating within.
PathValue []string
PathValue addrs.ModuleInstance
// Interpolater setting below affect the interpolation of variables.
//
// The InterpolaterVars are the exact value for ${var.foo} values.
// The map is shared between all contexts and is a mapping of
// PATH to KEY to VALUE. Because it is shared by all contexts as well
// as the Interpolater itself, it is protected by InterpolaterVarLock
// which must be locked during any access to the map.
Interpolater *Interpolater
InterpolaterVars map[string]map[string]interface{}
InterpolaterVarLock *sync.Mutex
// Evaluator is used for evaluating expressions within the scope of this
// eval context.
Evaluator *Evaluator
ChildModuleCallArgs map[string]map[string]cty.Value
ChildModuleCallsLock *sync.Mutex
Components contextComponentFactory
Hooks []Hook
InputValue UIInput
ProviderCache map[string]ResourceProvider
ProviderSchemas map[string]*ProviderSchema
ProviderInputConfig map[string]map[string]interface{}
ProviderInputConfig map[string]map[string]cty.Value
ProviderLock *sync.Mutex
ProvisionerCache map[string]ResourceProvisioner
ProvisionerSchemas map[string]*configschema.Block
ProvisionerLock *sync.Mutex
DiffValue *Diff
DiffLock *sync.RWMutex
@ -46,6 +48,9 @@ type BuiltinEvalContext struct {
once sync.Once
}
// BuiltinEvalContext implements EvalContext
var _ EvalContext = (*BuiltinEvalContext)(nil)
func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
// This can happen during tests. During tests, we just block forever.
if ctx.StopContext == nil {
@ -79,12 +84,13 @@ func (ctx *BuiltinEvalContext) Input() UIInput {
return ctx.InputValue
}
func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) {
func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (ResourceProvider, error) {
ctx.once.Do(ctx.init)
absAddr := addr.Absolute(ctx.Path())
// If we already initialized, it is an error
if p := ctx.Provider(name); p != nil {
return nil, fmt.Errorf("Provider '%s' already initialized", name)
if p := ctx.Provider(absAddr); p != nil {
return nil, fmt.Errorf("%s is already initialized", addr)
}
// Warning: make sure to acquire these locks AFTER the call to Provider
@ -92,12 +98,14 @@ func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProv
ctx.ProviderLock.Lock()
defer ctx.ProviderLock.Unlock()
p, err := ctx.Components.ResourceProvider(typeName, name)
key := addr.String()
p, err := ctx.Components.ResourceProvider(typeName, key)
if err != nil {
return nil, err
}
ctx.ProviderCache[name] = p
ctx.ProviderCache[key] = p
// Also fetch and cache the provider's schema.
// FIXME: This is using a non-ideal provider API that requires us to
@ -120,45 +128,46 @@ func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProv
ResourceTypes: resourceTypeNames,
})
if err != nil {
return nil, fmt.Errorf("error fetching schema for %s: %s", name, err)
return nil, fmt.Errorf("error fetching schema for %s: %s", key, err)
}
if ctx.ProviderSchemas == nil {
ctx.ProviderSchemas = make(map[string]*ProviderSchema)
}
ctx.ProviderSchemas[name] = schema
ctx.ProviderSchemas[key] = schema
return p, nil
}
func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) ResourceProvider {
ctx.once.Do(ctx.init)
ctx.ProviderLock.Lock()
defer ctx.ProviderLock.Unlock()
return ctx.ProviderCache[n]
return ctx.ProviderCache[addr.String()]
}
func (ctx *BuiltinEvalContext) ProviderSchema(n string) *ProviderSchema {
func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema {
ctx.once.Do(ctx.init)
ctx.ProviderLock.Lock()
defer ctx.ProviderLock.Unlock()
return ctx.ProviderSchemas[n]
return ctx.ProviderSchemas[addr.String()]
}
func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error {
ctx.once.Do(ctx.init)
ctx.ProviderLock.Lock()
defer ctx.ProviderLock.Unlock()
key := addr.String()
var provider interface{}
provider = ctx.ProviderCache[n]
provider = ctx.ProviderCache[key]
if provider != nil {
if p, ok := provider.(ResourceProviderCloser); ok {
delete(ctx.ProviderCache, n)
delete(ctx.ProviderCache, key)
return p.Close()
}
}
@ -166,28 +175,32 @@ func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
return nil
}
func (ctx *BuiltinEvalContext) ConfigureProvider(
n string, cfg *ResourceConfig) error {
p := ctx.Provider(n)
func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
absAddr := addr.Absolute(ctx.Path())
p := ctx.Provider(absAddr)
if p == nil {
return fmt.Errorf("Provider '%s' not initialized", n)
diags = diags.Append(fmt.Errorf("%s not initialized", addr))
return diags
}
return p.Configure(cfg)
// FIXME: The provider API isn't yet updated to take a cty.Value directly.
rc := NewResourceConfigShimmed(cfg, ctx.ProviderSchema(absAddr).Provider)
err := p.Configure(rc)
if err != nil {
diags = diags.Append(err)
}
return diags
}
func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value {
ctx.ProviderLock.Lock()
defer ctx.ProviderLock.Unlock()
// Make a copy of the path so we can safely edit it
// Go up the module tree, looking for input results for the given provider
// configuration.
path := ctx.Path()
pathCopy := make([]string, len(path)+1)
copy(pathCopy, path)
// Go up the tree.
for i := len(path) - 1; i >= 0; i-- {
pathCopy[i+1] = n
k := PathCacheKey(pathCopy[:i+2])
for i := len(path); i >= 0; i-- {
k := pc.Absolute(path[:i]).String()
if v, ok := ctx.ProviderInputConfig[k]; ok {
return v
}
@ -196,19 +209,16 @@ func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
return nil
}
func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
providerPath := make([]string, len(ctx.Path())+1)
copy(providerPath, ctx.Path())
providerPath[len(providerPath)-1] = n
func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) {
absProvider := pc.Absolute(ctx.Path())
// Save the configuration
ctx.ProviderLock.Lock()
ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
ctx.ProviderInputConfig[absProvider.String()] = c
ctx.ProviderLock.Unlock()
}
func (ctx *BuiltinEvalContext) InitProvisioner(
n string) (ResourceProvisioner, error) {
func (ctx *BuiltinEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
ctx.once.Do(ctx.init)
// If we already initialized, it is an error
@ -221,10 +231,7 @@ func (ctx *BuiltinEvalContext) InitProvisioner(
ctx.ProvisionerLock.Lock()
defer ctx.ProvisionerLock.Unlock()
provPath := make([]string, len(ctx.Path())+1)
copy(provPath, ctx.Path())
provPath[len(provPath)-1] = n
key := PathCacheKey(provPath)
key := PathObjectCacheKey(ctx.Path(), n)
p, err := ctx.Components.ResourceProvisioner(n, key)
if err != nil {
@ -232,6 +239,17 @@ func (ctx *BuiltinEvalContext) InitProvisioner(
}
ctx.ProvisionerCache[key] = p
// Also fetch the provisioner's schema
schema, err := p.GetConfigSchema()
if err != nil {
return nil, fmt.Errorf("error getting schema for provisioner %q: %s", n, err)
}
if ctx.ProvisionerSchemas == nil {
ctx.ProvisionerSchemas = make(map[string]*configschema.Block)
}
ctx.ProvisionerSchemas[n] = schema
return p, nil
}
@ -241,11 +259,17 @@ func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
ctx.ProvisionerLock.Lock()
defer ctx.ProvisionerLock.Unlock()
provPath := make([]string, len(ctx.Path())+1)
copy(provPath, ctx.Path())
provPath[len(provPath)-1] = n
key := PathObjectCacheKey(ctx.Path(), n)
return ctx.ProvisionerCache[key]
}
return ctx.ProvisionerCache[PathCacheKey(provPath)]
func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block {
ctx.once.Do(ctx.init)
ctx.ProviderLock.Lock()
defer ctx.ProviderLock.Unlock()
return ctx.ProvisionerSchemas[n]
}
func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
@ -254,15 +278,13 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
ctx.ProvisionerLock.Lock()
defer ctx.ProvisionerLock.Unlock()
provPath := make([]string, len(ctx.Path())+1)
copy(provPath, ctx.Path())
provPath[len(provPath)-1] = n
key := PathObjectCacheKey(ctx.Path(), n)
var prov interface{}
prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
prov = ctx.ProvisionerCache[key]
if prov != nil {
if p, ok := prov.(ResourceProvisionerCloser); ok {
delete(ctx.ProvisionerCache, PathCacheKey(provPath))
delete(ctx.ProvisionerCache, key)
return p.Close()
}
}
@ -270,81 +292,40 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
return nil
}
func (ctx *BuiltinEvalContext) Interpolate(
cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
if cfg != nil {
scope := &InterpolationScope{
Path: ctx.Path(),
Resource: r,
}
vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
if err != nil {
return nil, err
}
// Do the interpolation
if err := cfg.Interpolate(vs); err != nil {
return nil, err
}
}
result := NewResourceConfig(cfg)
result.interpolateForce()
return result, nil
func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
scope := ctx.Evaluator.Scope(ctx.PathValue, self)
body, evalDiags := scope.ExpandBlock(body, schema)
diags = diags.Append(evalDiags)
val, evalDiags := scope.EvalBlock(body, schema)
diags = diags.Append(evalDiags)
return val, body, diags
}
func (ctx *BuiltinEvalContext) InterpolateProvider(
pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) {
var cfg *config.RawConfig
if pc != nil && pc.RawConfig != nil {
scope := &InterpolationScope{
Path: ctx.Path(),
Resource: r,
}
cfg = pc.RawConfig
vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
if err != nil {
return nil, err
}
// Do the interpolation
if err := cfg.Interpolate(vs); err != nil {
return nil, err
}
}
result := NewResourceConfig(cfg)
result.interpolateForce()
return result, nil
func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
scope := ctx.Evaluator.Scope(ctx.PathValue, self)
return scope.EvalExpr(expr, wantType)
}
func (ctx *BuiltinEvalContext) Path() []string {
func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance {
return ctx.PathValue
}
func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
ctx.InterpolaterVarLock.Lock()
defer ctx.InterpolaterVarLock.Unlock()
func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) {
ctx.ChildModuleCallsLock.Lock()
defer ctx.ChildModuleCallsLock.Unlock()
path := make([]string, len(ctx.Path())+1)
copy(path, ctx.Path())
path[len(path)-1] = n
key := PathCacheKey(path)
childPath := ctx.Path().Child(n.Call.Name, n.Key)
key := childPath.String()
vars := ctx.InterpolaterVars[key]
if vars == nil {
vars = make(map[string]interface{})
ctx.InterpolaterVars[key] = vars
args := ctx.ChildModuleCallArgs[key]
if args == nil {
args = make(map[string]cty.Value)
ctx.ChildModuleCallArgs[key] = args
}
for k, v := range vs {
vars[k] = v
for k, v := range vals {
args[k] = v
}
}

View File

@ -3,6 +3,13 @@ package terraform
import (
"sync"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/config"
)
@ -20,34 +27,35 @@ type MockEvalContext struct {
InputInput UIInput
InitProviderCalled bool
InitProviderName string
InitProviderType string
InitProviderAddr addrs.ProviderConfig
InitProviderProvider ResourceProvider
InitProviderError error
ProviderCalled bool
ProviderName string
ProviderAddr addrs.AbsProviderConfig
ProviderProvider ResourceProvider
ProviderSchemaCalled bool
ProviderSchemaName string
ProviderSchemaAddr addrs.AbsProviderConfig
ProviderSchemaSchema *ProviderSchema
CloseProviderCalled bool
CloseProviderName string
CloseProviderAddr addrs.ProviderConfig
CloseProviderProvider ResourceProvider
ProviderInputCalled bool
ProviderInputName string
ProviderInputConfig map[string]interface{}
ProviderInputAddr addrs.ProviderConfig
ProviderInputValues map[string]cty.Value
SetProviderInputCalled bool
SetProviderInputName string
SetProviderInputConfig map[string]interface{}
SetProviderInputAddr addrs.ProviderConfig
SetProviderInputValues map[string]cty.Value
ConfigureProviderCalled bool
ConfigureProviderName string
ConfigureProviderConfig *ResourceConfig
ConfigureProviderError error
ConfigureProviderAddr addrs.ProviderConfig
ConfigureProviderConfig cty.Value
ConfigureProviderDiags tfdiags.Diagnostics
InitProvisionerCalled bool
InitProvisionerName string
@ -58,10 +66,29 @@ type MockEvalContext struct {
ProvisionerName string
ProvisionerProvisioner ResourceProvisioner
ProvisionerSchemaCalled bool
ProvisionerSchemaName string
ProvisionerSchemaSchema *configschema.Block
CloseProvisionerCalled bool
CloseProvisionerName string
CloseProvisionerProvisioner ResourceProvisioner
EvaluateBlockCalled bool
EvaluateBlockBody hcl.Body
EvaluateBlockSchema *configschema.Block
EvaluateBlockSelf addrs.Referenceable
EvaluateBlockResult cty.Value
EvaluateBlockExpandedBody hcl.Body
EvaluateBlockDiags tfdiags.Diagnostics
EvaluateExprCalled bool
EvaluateExprExpr hcl.Expression
EvaluateExprWantType cty.Type
EvaluateExprSelf addrs.Referenceable
EvaluateExprResult cty.Value
EvaluateExprDiags tfdiags.Diagnostics
InterpolateCalled bool
InterpolateConfig *config.RawConfig
InterpolateResource *Resource
@ -75,11 +102,11 @@ type MockEvalContext struct {
InterpolateProviderError error
PathCalled bool
PathPath []string
PathPath addrs.ModuleInstance
SetVariablesCalled bool
SetVariablesModule string
SetVariablesVariables map[string]interface{}
SetModuleCallArgumentsCalled bool
SetModuleCallArgumentsModule addrs.ModuleCallInstance
SetModuleCallArgumentsValues map[string]cty.Value
DiffCalled bool
DiffDiff *Diff
@ -90,6 +117,9 @@ type MockEvalContext struct {
StateLock *sync.RWMutex
}
// MockEvalContext implements EvalContext
var _ EvalContext = (*MockEvalContext)(nil)
func (c *MockEvalContext) Stopped() <-chan struct{} {
c.StoppedCalled = true
return c.StoppedValue
@ -111,47 +141,48 @@ func (c *MockEvalContext) Input() UIInput {
return c.InputInput
}
func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) {
func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (ResourceProvider, error) {
c.InitProviderCalled = true
c.InitProviderName = n
c.InitProviderType = t
c.InitProviderAddr = addr
return c.InitProviderProvider, c.InitProviderError
}
func (c *MockEvalContext) Provider(n string) ResourceProvider {
func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) ResourceProvider {
c.ProviderCalled = true
c.ProviderName = n
c.ProviderAddr = addr
return c.ProviderProvider
}
func (c *MockEvalContext) ProviderSchema(n string) *ProviderSchema {
func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema {
c.ProviderSchemaCalled = true
c.ProviderSchemaName = n
c.ProviderSchemaAddr = addr
return c.ProviderSchemaSchema
}
func (c *MockEvalContext) CloseProvider(n string) error {
func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error {
c.CloseProviderCalled = true
c.CloseProviderName = n
c.CloseProviderAddr = addr
return nil
}
func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics {
c.ConfigureProviderCalled = true
c.ConfigureProviderName = n
c.ConfigureProviderAddr = addr
c.ConfigureProviderConfig = cfg
return c.ConfigureProviderError
return c.ConfigureProviderDiags
}
func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value {
c.ProviderInputCalled = true
c.ProviderInputName = n
return c.ProviderInputConfig
c.ProviderInputAddr = addr
return c.ProviderInputValues
}
func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) {
c.SetProviderInputCalled = true
c.SetProviderInputName = n
c.SetProviderInputConfig = cfg
c.SetProviderInputAddr = addr
c.SetProviderInputValues = vals
}
func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
@ -166,12 +197,34 @@ func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
return c.ProvisionerProvisioner
}
func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block {
c.ProvisionerSchemaCalled = true
c.ProvisionerSchemaName = n
return c.ProvisionerSchemaSchema
}
func (c *MockEvalContext) CloseProvisioner(n string) error {
c.CloseProvisionerCalled = true
c.CloseProvisionerName = n
return nil
}
func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
c.EvaluateBlockCalled = true
c.EvaluateBlockBody = body
c.EvaluateBlockSchema = schema
c.EvaluateBlockSelf = self
return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags
}
func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
c.EvaluateExprCalled = true
c.EvaluateExprExpr = expr
c.EvaluateExprWantType = wantType
c.EvaluateExprSelf = self
return c.EvaluateExprResult, c.EvaluateExprDiags
}
func (c *MockEvalContext) Interpolate(
config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
c.InterpolateCalled = true
@ -188,15 +241,15 @@ func (c *MockEvalContext) InterpolateProvider(
return c.InterpolateProviderConfigResult, c.InterpolateError
}
func (c *MockEvalContext) Path() []string {
func (c *MockEvalContext) Path() addrs.ModuleInstance {
c.PathCalled = true
return c.PathPath
}
func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
c.SetVariablesCalled = true
c.SetVariablesModule = n
c.SetVariablesVariables = vs
func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) {
c.SetModuleCallArgumentsCalled = true
c.SetModuleCallArgumentsModule = n
c.SetModuleCallArgumentsValues = values
}
func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {

View File

@ -1,58 +1,142 @@
package terraform
import (
"github.com/hashicorp/terraform/config"
"fmt"
"log"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
)
// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
// when there is a resource count with zero/one boundary, i.e. fixing
// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
type EvalCountFixZeroOneBoundary struct {
Resource *config.Resource
// evaluateResourceCountExpression is our standard mechanism for interpreting an
// expression given for a "count" argument on a resource. This should be called
// from the DynamicExpand of a node representing a resource in order to
// determine the final count value.
//
// If the result is zero or positive and no error diagnostics are returned, then
// the result is the literal count value to use.
//
// If the result is -1, this indicates that the given expression is nil and so
// the "count" behavior should not be enabled for this resource at all.
//
// If error diagnostics are returned then the result is undefined and must
// not be used.
func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) {
if expr == nil {
return -1, nil
}
var diags tfdiags.Diagnostics
var count int
countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil)
diags = diags.Append(countDiags)
if diags.HasErrors() {
return -1, diags
}
switch {
case countVal.IsNull():
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: `The given "count" argument value is null. An integer is required.`,
Subject: expr.Range().Ptr(),
})
return -1, diags
case !countVal.IsKnown():
// Currently this is a rather bad outcome from a UX standpoint, since we have
// no real mechanism to deal with this situation and all we can do is produce
// an error message.
// FIXME: In future, implement a built-in mechanism for deferring changes that
// can't yet be predicted, and use it to guide the user through several
// plan/apply steps until the desired configuration is eventually reached.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`,
Subject: expr.Range().Ptr(),
})
return -1, diags
}
err := gocty.FromCtyValue(countVal, &count)
if err != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
Subject: expr.Range().Ptr(),
})
return -1, diags
}
if count < 0 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`,
Subject: expr.Range().Ptr(),
})
return -1, diags
}
return count, diags
}
// TODO: test
func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
// Get the count, important for knowing whether we're supposed to
// be adding the zero, or trimming it.
count, err := n.Resource.Count()
if err != nil {
return nil, err
// fixResourceCountSetTransition is a helper function to fix up the state when a
// resource transitions its "count" from being set to unset or vice-versa,
// treating a 0-key and a no-key instance as aliases for one another across
// the transition.
//
// The correct time to call this function is in the DynamicExpand method for
// a node representing a resource, just after evaluating the count with
// evaluateResourceCountExpression, and before any other analysis of the
// state such as orphan detection.
//
// This function calls methods on the given EvalContext to update the current
// state in-place, if necessary. It is a no-op if there is no count transition
// taking place.
//
// Since the state is modified in-place, this function must take a writer lock
// on the state. The caller must therefore not also be holding a state lock,
// or this function will block forever awaiting the lock.
func fixResourceCountSetTransition(ctx EvalContext, addr addrs.Resource, countEnabled bool) {
huntAddr := addr.Instance(addrs.NoKey)
replaceAddr := addr.Instance(addrs.IntKey(0))
if !countEnabled {
huntAddr, replaceAddr = replaceAddr, huntAddr
}
// Figure what to look for and what to replace it with
hunt := n.Resource.Id()
replace := hunt + ".0"
if count < 2 {
hunt, replace = replace, hunt
}
path := ctx.Path()
// The state still uses our legacy internal address string format, so we
// need to shim here.
huntKey := NewLegacyResourceInstanceAddress(huntAddr.Absolute(path)).stateId()
replaceKey := NewLegacyResourceInstanceAddress(replaceAddr.Absolute(path)).stateId()
state, lock := ctx.State()
// Get a lock so we can access this instance and potentially make
// changes to it.
lock.Lock()
defer lock.Unlock()
// Look for the module state. If we don't have one, then it doesn't matter.
mod := state.ModuleByPath(ctx.Path())
mod := state.ModuleByPath(path)
if mod == nil {
return nil, nil
return
}
// Look for the resource state. If we don't have one, then it is okay.
rs, ok := mod.Resources[hunt]
rs, ok := mod.Resources[huntKey]
if !ok {
return nil, nil
return
}
// If the replacement key exists, we just keep both
if _, ok := mod.Resources[replace]; ok {
return nil, nil
// If the replacement key also exists then we do nothing and keep both.
if _, ok := mod.Resources[replaceKey]; ok {
return
}
mod.Resources[replace] = rs
delete(mod.Resources, hunt)
return nil, nil
mod.Resources[replaceKey] = rs
delete(mod.Resources, huntKey)
log.Printf("[TRACE] renamed %s to %s in transient state due to count argument change", huntKey, replaceKey)
}

View File

@ -1,18 +1,24 @@
package terraform
import (
"bytes"
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/version"
)
// EvalCompareDiff is an EvalNode implementation that compares two diffs
// and errors if the diffs are not equal.
type EvalCompareDiff struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
One, Two **InstanceDiff
}
@ -43,10 +49,10 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
}()
if same, reason := one.Same(two); !same {
log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
log.Printf("[ERROR] %s: diffs didn't match", n.Addr)
log.Printf("[ERROR] %s: reason: %s", n.Addr, reason)
log.Printf("[ERROR] %s: diff one: %#v", n.Addr, one)
log.Printf("[ERROR] %s: diff two: %#v", n.Addr, two)
return nil, fmt.Errorf(
"%s: diffs didn't match during apply. This is a bug with "+
"Terraform and should be reported as a GitHub Issue.\n"+
@ -61,7 +67,7 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
"\n"+
"Also include as much context as you can about your config, state, "+
"and the steps you performed to trigger this error.\n",
n.Info.Id, version.Version, n.Info.Id, reason, one, two)
n.Addr, version.Version, n.Addr, reason, one, two)
}
return nil, nil
@ -70,23 +76,17 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
// EvalDiff is an EvalNode implementation that does a refresh for
// a resource.
type EvalDiff struct {
Name string
Info *InstanceInfo
Config **ResourceConfig
Provider *ResourceProvider
Diff **InstanceDiff
State **InstanceState
Addr addrs.ResourceInstance
Config *configs.Resource
Provider *ResourceProvider
ProviderSchema **ProviderSchema
State **InstanceState
PreviousDiff **InstanceDiff
OutputDiff **InstanceDiff
OutputValue *cty.Value
OutputState **InstanceState
// Resource is needed to fetch the ignore_changes list so we can
// filter user-requested ignored attributes from the diff.
Resource *config.Resource
// Stub is used to flag the generated InstanceDiff as a stub. This is used to
// ensure that the node exists to perform interpolations and generate
// computed paths off of, but not as an actual diff where resouces should be
// counted, and not as a diff that should be acted on.
Stub bool
}
@ -95,11 +95,21 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
state := *n.State
config := *n.Config
provider := *n.Provider
providerSchema := *n.ProviderSchema
var diags tfdiags.Diagnostics
// The provider and hook APIs still expect our legacy InstanceInfo type.
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path()).ContainingResource())
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(n.Addr.Absolute(ctx.Path())).stateId()
// Call pre-diff hook
if !n.Stub {
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Info, state)
return h.PreDiff(legacyInfo, state)
})
if err != nil {
return nil, err
@ -113,8 +123,23 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
}
diffState.init()
// Evaluate the configuration
schema := providerSchema.ResourceTypes[n.Addr.Resource.Type]
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags.Err()
}
// The provider API still expects our legacy ResourceConfig type.
legacyRC := NewResourceConfigShimmed(configVal, schema)
// Diff!
diff, err := provider.Diff(n.Info, diffState, config)
diff, err := provider.Diff(legacyInfo, diffState, legacyRC)
if err != nil {
return nil, err
}
@ -123,7 +148,7 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
}
// Set DestroyDeposed if we have deposed instances
_, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
_, err = readInstanceFromState(ctx, stateId, nil, func(rs *ResourceState) (*InstanceState, error) {
if len(rs.Deposed) > 0 {
diff.DestroyDeposed = true
}
@ -135,8 +160,8 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
}
// Preserve the DestroyTainted flag
if n.Diff != nil {
diff.SetTainted((*n.Diff).GetDestroyTainted())
if n.PreviousDiff != nil {
diff.SetTainted((*n.PreviousDiff).GetDestroyTainted())
}
// Require a destroy if there is an ID and it requires new.
@ -161,7 +186,7 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
})
}
// filter out ignored resources
// filter out ignored attributes
if err := n.processIgnoreChanges(diff); err != nil {
return nil, err
}
@ -169,7 +194,7 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
// Call post-refresh hook
if !n.Stub {
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(n.Info, diff)
return h.PostDiff(legacyInfo, diff)
})
if err != nil {
return nil, err
@ -181,6 +206,10 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
*n.OutputDiff = diff
}
if n.OutputValue != nil {
*n.OutputValue = configVal
}
// Update the state if we care
if n.OutputState != nil {
*n.OutputState = state
@ -195,12 +224,13 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
}
func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
if diff == nil || n.Config == nil || n.Config.Managed == nil {
return nil
}
ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
ignoreChanges := n.Config.Managed.IgnoreChanges
ignoreAll := n.Config.Managed.IgnoreAllChanges
if len(ignoreChanges) == 0 {
if len(ignoreChanges) == 0 && !ignoreAll {
return nil
}
@ -220,9 +250,10 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
// get the complete set of keys we want to ignore
ignorableAttrKeys := make(map[string]bool)
for _, ignoredKey := range ignoreChanges {
for _, ignoredTraversal := range ignoreChanges {
ignoredKey := legacyFlatmapKeyForTraversal(ignoredTraversal)
for k := range attrs {
if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
if ignoreAll || strings.HasPrefix(k, ignoredKey) {
ignorableAttrKeys[k] = true
}
}
@ -285,14 +316,56 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
// If we didn't hit any of our early exit conditions, we can filter the diff.
for k := range ignorableAttrKeys {
log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
n.Resource.Id(), k)
log.Printf("[DEBUG] [EvalIgnoreChanges] %s: Ignoring diff attribute: %s", n.Addr.String(), k)
diff.DelAttribute(k)
}
return nil
}
// legacyFlagmapKeyForTraversal constructs a key string compatible with what
// the flatmap package would generate for an attribute addressable by the given
// traversal.
//
// This is used only to shim references to attributes within the diff and
// state structures, which have not (at the time of writing) yet been updated
// to use the newer HCL-based representations.
func legacyFlatmapKeyForTraversal(traversal hcl.Traversal) string {
var buf bytes.Buffer
first := true
for _, step := range traversal {
if !first {
buf.WriteByte('.')
}
switch ts := step.(type) {
case hcl.TraverseRoot:
buf.WriteString(ts.Name)
case hcl.TraverseAttr:
buf.WriteString(ts.Name)
case hcl.TraverseIndex:
val := ts.Key
switch val.Type() {
case cty.Number:
bf := val.AsBigFloat()
buf.WriteString(bf.String())
case cty.String:
s := val.AsString()
buf.WriteString(s)
default:
// should never happen, since no other types appear in
// traversals in practice.
buf.WriteByte('?')
}
default:
// should never happen, since we've covered all of the types
// that show up in parsed traversals in practice.
buf.WriteByte('?')
}
first = false
}
return buf.String()
}
// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
type flatAttrDiff map[string]*ResourceAttrDiff
@ -343,7 +416,7 @@ func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
// EvalDiffDestroy is an EvalNode implementation that returns a plain
// destroy diff.
type EvalDiffDestroy struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
State **InstanceState
Output **InstanceDiff
}
@ -357,9 +430,12 @@ func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
return nil, nil
}
// The provider and hook APIs still expect our legacy InstanceInfo type.
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path()).ContainingResource())
// Call pre-diff hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Info, state)
return h.PreDiff(legacyInfo, state)
})
if err != nil {
return nil, err
@ -370,7 +446,7 @@ func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
// Call post-diff hook
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(n.Info, diff)
return h.PostDiff(legacyInfo, diff)
})
if err != nil {
return nil, err
@ -385,7 +461,7 @@ func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
// the full diff.
type EvalDiffDestroyModule struct {
Path []string
Path addrs.ModuleInstance
}
// TODO: test

View File

@ -2,6 +2,8 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/addrs"
)
// EvalImportState is an EvalNode implementation that performs an
@ -55,7 +57,7 @@ func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
// EvalImportStateVerify verifies the state after ImportState and
// after the refresh to make sure it is non-nil and valid.
type EvalImportStateVerify struct {
Info *InstanceInfo
Addr addrs.ResourceInstance
Id string
State **InstanceState
}
@ -68,7 +70,7 @@ func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
"import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
"exist. Please verify the ID is correct. You cannot import non-existent\n"+
"resources using Terraform import.",
n.Info.HumanId(),
n.Addr.String(),
n.Id)
}

View File

@ -1,56 +0,0 @@
package terraform
import (
"log"
"github.com/hashicorp/terraform/config"
)
// EvalInterpolate is an EvalNode implementation that takes a raw
// configuration and interpolates it.
type EvalInterpolate struct {
Config *config.RawConfig
Resource *Resource
Output **ResourceConfig
ContinueOnErr bool
}
func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
rc, err := ctx.Interpolate(n.Config, n.Resource)
if err != nil {
if n.ContinueOnErr {
log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err)
return nil, EvalEarlyExitError{}
}
return nil, err
}
if n.Output != nil {
*n.Output = rc
}
return nil, nil
}
// EvalInterpolateProvider is an EvalNode implementation that takes a
// ProviderConfig and interpolates it. Provider configurations are the only
// "inherited" type of configuration we have, and the original raw config may
// have a different interpolation scope.
type EvalInterpolateProvider struct {
Config *config.ProviderConfig
Resource *Resource
Output **ResourceConfig
}
func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) {
rc, err := ctx.InterpolateProvider(n.Config, n.Resource)
if err != nil {
return nil, err
}
if n.Output != nil {
*n.Output = rc
}
return nil, nil
}

View File

@ -1,37 +0,0 @@
package terraform
import (
"reflect"
"testing"
"github.com/hashicorp/terraform/config"
)
func TestEvalInterpolate_impl(t *testing.T) {
var _ EvalNode = new(EvalInterpolate)
}
func TestEvalInterpolate(t *testing.T) {
config, err := config.NewRawConfig(map[string]interface{}{})
if err != nil {
t.Fatalf("err: %s", err)
}
var actual *ResourceConfig
n := &EvalInterpolate{Config: config, Output: &actual}
result := testResourceConfig(t, map[string]interface{}{})
ctx := &MockEvalContext{InterpolateConfigResult: result}
if _, err := n.Eval(ctx); err != nil {
t.Fatalf("err: %s", err)
}
if actual != result {
t.Fatalf("bad: %#v", actual)
}
if !ctx.InterpolateCalled {
t.Fatal("should be called")
}
if !reflect.DeepEqual(ctx.InterpolateConfig, config) {
t.Fatalf("bad: %#v", ctx.InterpolateConfig)
}
}

61
terraform/eval_lang.go Normal file
View File

@ -0,0 +1,61 @@
package terraform
import (
"log"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/config/configschema"
"github.com/zclconf/go-cty/cty"
)
// EvalConfigBlock is an EvalNode implementation that takes a raw
// configuration block and evaluates any expressions within it.
//
// ExpandedConfig is populated with the result of expanding any "dynamic"
// blocks in the given body, which can be useful for extracting correct source
// location information for specific attributes in the result.
type EvalConfigBlock struct {
Config *hcl.Body
Schema *configschema.Block
SelfAddr addrs.Referenceable
Output *cty.Value
ExpandedConfig *hcl.Body
ContinueOnErr bool
}
func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) {
val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr)
if diags.HasErrors() && n.ContinueOnErr {
log.Printf("[WARN] Block evaluation failed: %s", diags.Err())
return nil, EvalEarlyExitError{}
}
if n.Output != nil {
*n.Output = val
}
if n.ExpandedConfig != nil {
*n.ExpandedConfig = body
}
return nil, diags.ErrWithWarnings()
}
// EvalConfigExpr is an EvalNode implementation that takes a raw configuration
// expression and evaluates it.
type EvalConfigExpr struct {
Expr hcl.Expression
SelfAddr addrs.Referenceable
Output *cty.Value
}
func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) {
val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr)
if n.Output != nil {
*n.Output = val
}
return nil, diags.ErrWithWarnings()
}

View File

@ -3,21 +3,24 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/hcl2shim"
"github.com/zclconf/go-cty/cty"
)
// EvalLocal is an EvalNode implementation that evaluates the
// expression for a local value and writes it into a transient part of
// the state.
type EvalLocal struct {
Name string
Value *config.RawConfig
Addr addrs.LocalValue
Expr hcl.Expression
}
func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
cfg, err := ctx.Interpolate(n.Value, nil)
if err != nil {
return nil, fmt.Errorf("local.%s: %s", n.Name, err)
val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
if diags.HasErrors() {
return nil, diags.Err()
}
state, lock := ctx.State()
@ -35,24 +38,15 @@ func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
mod = state.AddModule(ctx.Path())
}
// Get the value from the config
var valueRaw interface{} = config.UnknownVariableValue
if cfg != nil {
var ok bool
valueRaw, ok = cfg.Get("value")
if !ok {
valueRaw = ""
}
if cfg.IsComputed("value") {
valueRaw = config.UnknownVariableValue
}
}
// Lower the value to the legacy form that our state structures still expect.
// FIXME: Update mod.Locals to be a map[string]cty.Value .
legacyVal := hcl2shim.ConfigValueFromHCL2(val)
if mod.Locals == nil {
// initialize
mod.Locals = map[string]interface{}{}
}
mod.Locals[n.Name] = valueRaw
mod.Locals[n.Addr.Name] = legacyVal
return nil, nil
}
@ -61,7 +55,7 @@ func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
// from the state. Locals aren't persisted, but we don't need to evaluate them
// during destroy.
type EvalDeleteLocal struct {
Name string
Addr addrs.LocalValue
}
func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) {
@ -80,7 +74,7 @@ func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) {
return nil, nil
}
delete(mod.Locals, n.Name)
delete(mod.Locals, n.Addr.Name)
return nil, nil
}

View File

@ -4,13 +4,18 @@ import (
"fmt"
"log"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/hcl2shim"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
)
// EvalDeleteOutput is an EvalNode implementation that deletes an output
// from the state.
type EvalDeleteOutput struct {
Name string
Addr addrs.OutputValue
}
// TODO: test
@ -30,7 +35,7 @@ func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
return nil, nil
}
delete(mod.Outputs, n.Name)
delete(mod.Outputs, n.Addr.Name)
return nil, nil
}
@ -38,19 +43,19 @@ func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
// EvalWriteOutput is an EvalNode implementation that writes the output
// for the given name to the current state.
type EvalWriteOutput struct {
Name string
Addr addrs.OutputValue
Sensitive bool
Value *config.RawConfig
Expr hcl.Expression
// ContinueOnErr allows interpolation to fail during Input
ContinueOnErr bool
}
// TODO: test
func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
// This has to run before we have a state lock, since interpolation also
// This has to run before we have a state lock, since evaluation also
// reads the state
cfg, err := ctx.Interpolate(n.Value, nil)
// handle the error after we have the module from the state
val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
// We'll handle errors below, after we have loaded the module.
state, lock := ctx.State()
if state == nil {
@ -67,67 +72,66 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
}
// handling the interpolation error
if err != nil {
if diags.HasErrors() {
if n.ContinueOnErr || flagWarnOutputErrors {
log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err)
log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err())
// if we're continuing, make sure the output is included, and
// marked as unknown
mod.Outputs[n.Name] = &OutputState{
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "string",
Value: config.UnknownVariableValue,
}
return nil, EvalEarlyExitError{}
}
return nil, err
return nil, diags.Err()
}
// Get the value from the config
var valueRaw interface{} = config.UnknownVariableValue
if cfg != nil {
var ok bool
valueRaw, ok = cfg.Get("value")
if !ok {
valueRaw = ""
ty := val.Type()
switch {
case ty.IsPrimitiveType():
// For now we record all primitive types as strings, for compatibility
// with our existing state formats.
// FIXME: Revise the state format to support any type.
var valueTyped string
switch {
case !val.IsKnown():
// Legacy handling of unknown values as a special string.
valueTyped = config.UnknownVariableValue
case val.IsNull():
// State doesn't currently support null, so we'll save as empty string.
valueTyped = ""
default:
err := gocty.FromCtyValue(val, &valueTyped)
if err != nil {
// Should never happen, because all primitives can convert to string.
return nil, fmt.Errorf("cannot marshal %#v for storage in state: %s", err)
}
}
if cfg.IsComputed("value") {
valueRaw = config.UnknownVariableValue
}
}
switch valueTyped := valueRaw.(type) {
case string:
mod.Outputs[n.Name] = &OutputState{
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "string",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case []interface{}:
mod.Outputs[n.Name] = &OutputState{
case ty.IsListType() || ty.IsTupleType() || ty.IsSetType():
// For now we'll use our legacy storage forms for list-like types.
// This produces a []interface{}.
valueTyped := hcl2shim.ConfigValueFromHCL2(val)
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "list",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case map[string]interface{}:
mod.Outputs[n.Name] = &OutputState{
case ty.IsMapType() || ty.IsObjectType():
// For now we'll use our legacy storage forms for map-like types.
// This produces a map[string]interface{}.
valueTyped := hcl2shim.ConfigValueFromHCL2(val)
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "map",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case []map[string]interface{}:
// an HCL map is multi-valued, so if this was read out of a config the
// map may still be in a slice.
if len(valueTyped) == 1 {
mod.Outputs[n.Name] = &OutputState{
Type: "map",
Sensitive: n.Sensitive,
Value: valueTyped[0],
}
break
}
return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
n.Name, valueTyped, len(valueTyped))
default:
return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
return nil, fmt.Errorf("output %s is not a valid type (%s)", n.Addr.Name, ty.FriendlyName())
}
return nil, nil

View File

@ -3,49 +3,68 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
)
// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
// merged with parents and inputs on top of what is configured in the file.
type EvalBuildProviderConfig struct {
Provider string
Config **ResourceConfig
Output **ResourceConfig
}
func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
cfg := *n.Config
func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, body hcl.Body) hcl.Body {
// If we have an Input configuration set, then merge that in
if input := ctx.ProviderInput(n.Provider); input != nil {
if input := ctx.ProviderInput(addr); input != nil {
// "input" is a map of the subset of config values that were known
// during the input walk, set by EvalInputProvider. Note that
// in particular it does *not* include attributes that had
// computed values at input time; those appear *only* in
// "cfg" here.
rc, err := config.NewRawConfig(input)
if err != nil {
return nil, err
}
merged := rc.Merge(cfg.raw)
cfg = NewResourceConfig(merged)
inputBody := configs.SynthBody("<input prompt>", input)
body = configs.MergeBodies(body, inputBody)
}
*n.Output = cfg
return nil, nil
return body
}
// EvalConfigProvider is an EvalNode implementation that configures
// a provider that is already initialized and retrieved.
type EvalConfigProvider struct {
Provider string
Config **ResourceConfig
Addr addrs.ProviderConfig
Provider *ResourceProvider
Config *configs.Provider
}
func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
var diags tfdiags.Diagnostics
provider := *n.Provider
config := n.Config
if config == nil {
// If we have no explicit configuration, just write an empty
// configuration into the provider.
configDiags := ctx.ConfigureProvider(n.Addr, cty.EmptyObjectVal)
return nil, configDiags.ErrWithWarnings()
}
schema, err := provider.GetSchema(&ProviderSchemaRequest{})
if err != nil {
diags = diags.Append(err)
return nil, diags.NonFatalErr()
}
configSchema := schema.Provider
configBody := buildProviderConfig(ctx, n.Addr, config.Config)
configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil)
diags = diags.Append(evalDiags)
if evalDiags.HasErrors() {
return nil, diags.NonFatalErr()
}
configDiags := ctx.ConfigureProvider(n.Addr, configVal)
configDiags = configDiags.InConfigBody(configBody)
return nil, configDiags.ErrWithWarnings()
}
// EvalInitProvider is an EvalNode implementation that initializes a provider
@ -53,85 +72,73 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
// EvalGetProvider node.
type EvalInitProvider struct {
TypeName string
Name string
Addr addrs.ProviderConfig
}
func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
return ctx.InitProvider(n.TypeName, n.Name)
return ctx.InitProvider(n.TypeName, n.Addr)
}
// EvalCloseProvider is an EvalNode implementation that closes provider
// connections that aren't needed anymore.
type EvalCloseProvider struct {
Name string
Addr addrs.ProviderConfig
}
func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
ctx.CloseProvider(n.Name)
ctx.CloseProvider(n.Addr)
return nil, nil
}
// EvalGetProvider is an EvalNode implementation that retrieves an already
// initialized provider instance for the given name.
//
// Unlike most eval nodes, this takes an _absolute_ provider configuration,
// because providers can be passed into and inherited between modules.
// Resource nodes must therefore know the absolute path of the provider they
// will use, which is usually accomplished by implementing
// interface GraphNodeProviderConsumer.
type EvalGetProvider struct {
Name string
Addr addrs.AbsProviderConfig
Output *ResourceProvider
// If non-nil, Schema will be updated after eval to refer to the
// schema of the provider.
Schema **ProviderSchema
}
func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
result := ctx.Provider(n.Name)
result := ctx.Provider(n.Addr)
if result == nil {
return nil, fmt.Errorf("provider %s not initialized", n.Name)
return nil, fmt.Errorf("provider %s not initialized", n.Addr)
}
if n.Output != nil {
*n.Output = result
}
if n.Schema != nil {
*n.Schema = ctx.ProviderSchema(n.Addr)
}
return nil, nil
}
// EvalInputProvider is an EvalNode implementation that asks for input
// for the given provider configurations.
type EvalInputProvider struct {
Name string
Addr addrs.ProviderConfig
Provider *ResourceProvider
Config **ResourceConfig
Config *configs.Provider
}
func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
rc := *n.Config
orig := rc.DeepCopy()
// Wrap the input into a namespace
input := &PrefixUIInput{
IdPrefix: fmt.Sprintf("provider.%s", n.Name),
QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
UIInput: ctx.Input(),
}
// Go through each provider and capture the input necessary
// to satisfy it.
config, err := (*n.Provider).Input(input, rc)
if err != nil {
return nil, fmt.Errorf(
"Error configuring %s: %s", n.Name, err)
}
// We only store values that have changed through Input.
// The goal is to cache cache input responses, not to provide a complete
// config for other providers.
confMap := make(map[string]interface{})
if config != nil && len(config.Config) > 0 {
// any values that weren't in the original ResourcConfig will be cached
for k, v := range config.Config {
if _, ok := orig.Config[k]; !ok {
confMap[k] = v
}
}
}
ctx.SetProviderInput(n.Name, confMap)
return nil, nil
// This is currently disabled. It used to interact with a provider method
// called Input, allowing the provider to capture input interactively
// itself, but once re-implemented we'll have this instead use the
// provider's configuration schema to automatically infer what we need
// to prompt for.
var diags tfdiags.Diagnostics
diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf("%s: provider input is temporarily disabled", n.Addr)))
return nil, diags.ErrWithWarnings()
}

View File

@ -2,6 +2,8 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config/configschema"
)
// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
@ -31,6 +33,7 @@ func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
type EvalGetProvisioner struct {
Name string
Output *ResourceProvisioner
Schema **configschema.Block
}
func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
@ -43,5 +46,9 @@ func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
*n.Output = result
}
if n.Schema != nil {
*n.Schema = ctx.ProvisionerSchema(n.Name)
}
return result, nil
}

View File

@ -2,16 +2,25 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/zclconf/go-cty/cty"
)
// EvalReadDataDiff is an EvalNode implementation that executes a data
// resource's ReadDataDiff method to discover what attributes it exports.
type EvalReadDataDiff struct {
Provider *ResourceProvider
Addr addrs.ResourceInstance
Config *configs.Resource
Provider *ResourceProvider
ProviderSchema **ProviderSchema
Output **InstanceDiff
OutputValue *cty.Value
OutputState **InstanceState
Config **ResourceConfig
Info *InstanceInfo
// Set Previous when re-evaluating diff during apply, to ensure that
// the "Destroy" flag is preserved.
@ -21,14 +30,20 @@ type EvalReadDataDiff struct {
func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
// TODO: test
var diags tfdiags.Diagnostics
// The provider and hook APIs still expect our legacy InstanceInfo type.
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path()).ContainingResource())
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Info, nil)
return h.PreDiff(legacyInfo, nil)
})
if err != nil {
return nil, err
}
var diff *InstanceDiff
var configVal cty.Value
if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
// If we're re-diffing for a diff that was already planning to
@ -37,11 +52,28 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
} else {
provider := *n.Provider
config := *n.Config
providerSchema := *n.ProviderSchema
schema := providerSchema.DataSources[n.Addr.Resource.Type]
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type)
}
var configDiags tfdiags.Diagnostics
configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags.Err()
}
// The provider API still expects our legacy ResourceConfig type.
legacyRC := NewResourceConfigShimmed(configVal, schema)
var err error
diff, err = provider.ReadDataDiff(n.Info, config)
diff, err = provider.ReadDataDiff(legacyInfo, legacyRC)
if err != nil {
return nil, err
diags = diags.Append(err)
return nil, diags.Err()
}
if diff == nil {
diff = new(InstanceDiff)
@ -61,7 +93,7 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
}
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(n.Info, diff)
return h.PostDiff(legacyInfo, diff)
})
if err != nil {
return nil, err
@ -69,6 +101,10 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
*n.Output = diff
if n.OutputValue != nil {
*n.OutputValue = configVal
}
if n.OutputState != nil {
state := &InstanceState{}
*n.OutputState = state
@ -80,16 +116,16 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
}
}
return nil, nil
return nil, diags.ErrWithWarnings()
}
// EvalReadDataApply is an EvalNode implementation that executes a data
// resource's ReadDataApply method to read data from the data source.
type EvalReadDataApply struct {
Addr addrs.ResourceInstance
Provider *ResourceProvider
Output **InstanceState
Diff **InstanceDiff
Info *InstanceInfo
}
func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
@ -97,6 +133,9 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
provider := *n.Provider
diff := *n.Diff
// The provider and hook APIs still expect our legacy InstanceInfo type.
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path()).ContainingResource())
// If the diff is for *destroying* this resource then we'll
// just drop its state and move on, since data resources don't
// support an actual "destroy" action.
@ -113,19 +152,19 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
err := ctx.Hook(func(h Hook) (HookAction, error) {
// We don't have a state yet, so we'll just give the hook an
// empty one to work with.
return h.PreRefresh(n.Info, &InstanceState{})
return h.PreRefresh(legacyInfo, &InstanceState{})
})
if err != nil {
return nil, err
}
state, err := provider.ReadDataApply(n.Info, diff)
state, err := provider.ReadDataApply(legacyInfo, diff)
if err != nil {
return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
return nil, fmt.Errorf("%s: %s", n.Addr.Absolute(ctx.Path()).String(), err)
}
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostRefresh(n.Info, state)
return h.PostRefresh(legacyInfo, state)
})
if err != nil {
return nil, err

View File

@ -3,14 +3,16 @@ package terraform
import (
"fmt"
"log"
"github.com/hashicorp/terraform/addrs"
)
// EvalRefresh is an EvalNode implementation that does a refresh for
// a resource.
type EvalRefresh struct {
Addr addrs.ResourceInstance
Provider *ResourceProvider
State **InstanceState
Info *InstanceInfo
Output **InstanceState
}
@ -19,29 +21,32 @@ func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
provider := *n.Provider
state := *n.State
// The provider and hook APIs still expect our legacy InstanceInfo type.
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path()).ContainingResource())
// If we have no state, we don't do any refreshing
if state == nil {
log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path()))
return nil, nil
}
// Call pre-refresh hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreRefresh(n.Info, state)
return h.PreRefresh(legacyInfo, state)
})
if err != nil {
return nil, err
}
// Refresh!
state, err = provider.Refresh(n.Info, state)
state, err = provider.Refresh(legacyInfo, state)
if err != nil {
return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
return nil, fmt.Errorf("%s: %s", n.Addr.Absolute(ctx.Path()), err.Error())
}
// Call post-refresh hook
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostRefresh(n.Info, state)
return h.PostRefresh(legacyInfo, state)
})
if err != nil {
return nil, err

View File

@ -1,13 +0,0 @@
package terraform
// EvalInstanceInfo is an EvalNode implementation that fills in the
// InstanceInfo as much as it can.
type EvalInstanceInfo struct {
Info *InstanceInfo
}
// TODO: test
func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
n.Info.ModulePath = ctx.Path()
return nil, nil
}

View File

@ -2,6 +2,8 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/addrs"
)
// EvalReadState is an EvalNode implementation that reads the
@ -130,13 +132,13 @@ func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
type EvalWriteState struct {
Name string
ResourceType string
Provider string
Provider addrs.AbsProviderConfig
Dependencies []string
State **InstanceState
}
func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider.String(), n.Dependencies,
func(rs *ResourceState) error {
rs.Primary = *n.State
return nil

View File

@ -3,125 +3,175 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
)
// EvalValidateError is the error structure returned if there were
// validation errors.
type EvalValidateError struct {
Warnings []string
Errors []error
}
func (e *EvalValidateError) Error() string {
return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
}
// EvalValidateCount is an EvalNode implementation that validates
// the count of a resource.
type EvalValidateCount struct {
Resource *config.Resource
Resource *configs.Resource
}
// TODO: test
func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
var diags tfdiags.Diagnostics
var count int
var errs []error
var err error
if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
errs = append(errs, fmt.Errorf(
"Failed to interpolate count: %s", err))
val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
goto RETURN
}
if val.IsNull() || !val.IsKnown() {
goto RETURN
}
count, err = n.Resource.Count()
err = gocty.FromCtyValue(val, &count)
if err != nil {
// If we can't get the count during validation, then
// just replace it with the number 1.
c := n.Resource.RawCount.Config()
c[n.Resource.RawCount.Key] = "1"
count = 1
}
err = nil
if count < 0 {
errs = append(errs, fmt.Errorf(
"Count is less than zero: %d", count))
// The EvaluateExpr call above already guaranteed us a number value,
// so if we end up here then we have something that is out of range
// for an int, and the error message will include a description of
// the valid range.
rawVal := val.AsBigFloat()
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count value",
Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err),
Subject: n.Resource.Count.Range().Ptr(),
})
} else if count < 0 {
rawVal := val.AsBigFloat()
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count value",
Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal),
Subject: n.Resource.Count.Range().Ptr(),
})
}
RETURN:
if len(errs) != 0 {
err = &EvalValidateError{
Errors: errs,
}
}
return nil, err
return nil, diags.NonFatalErr()
}
// EvalValidateProvider is an EvalNode implementation that validates
// the configuration of a resource.
// a provider configuration.
type EvalValidateProvider struct {
Addr addrs.ProviderConfig
Provider *ResourceProvider
Config **ResourceConfig
Config *configs.Provider
}
func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
var diags tfdiags.Diagnostics
provider := *n.Provider
config := *n.Config
config := n.Config
warns, errs := provider.Validate(config)
if n.Config == nil {
// Nothing to validate, then.
return nil, nil
}
schema, err := provider.GetSchema(&ProviderSchemaRequest{})
if err != nil {
diags = diags.Append(err)
return nil, diags.NonFatalErr()
}
configSchema := schema.Provider
configBody := buildProviderConfig(ctx, n.Addr, config.Config)
configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil)
diags = diags.Append(evalDiags)
if evalDiags.HasErrors() {
return nil, diags.NonFatalErr()
}
// The provider API expects our legacy ResourceConfig type, so we'll need
// to shim here.
rc := NewResourceConfigShimmed(configVal, configSchema)
warns, errs := provider.Validate(rc)
if len(warns) == 0 && len(errs) == 0 {
return nil, nil
}
return nil, &EvalValidateError{
Warnings: warns,
Errors: errs,
// FIXME: Once provider.Validate itself returns diagnostics, just
// return diags.NonFatalErr() immediately here.
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
return nil, diags.NonFatalErr()
}
// EvalValidateProvisioner is an EvalNode implementation that validates
// the configuration of a resource.
// the configuration of a provisioner belonging to a resource.
type EvalValidateProvisioner struct {
Provisioner *ResourceProvisioner
Config **ResourceConfig
ConnConfig **ResourceConfig
ResourceAddr addrs.ResourceInstance
Provisioner *ResourceProvisioner
Schema **configschema.Block
Config *configs.Provisioner
ConnConfig *configs.Connection
}
func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
provisioner := *n.Provisioner
config := *n.Config
schema := *n.Schema
var warns []string
var errs []error
var diags tfdiags.Diagnostics
{
// Validate the provisioner's own config first
w, e := provisioner.Validate(config)
configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, n.ResourceAddr)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags.Err()
}
// The provisioner API still uses our legacy ResourceConfig type, so
// we need to shim it.
legacyRC := NewResourceConfigShimmed(configVal, schema)
w, e := provisioner.Validate(legacyRC)
warns = append(warns, w...)
errs = append(errs, e...)
// FIXME: Once the provisioner API itself returns diagnostics, just
// return diags.NonFatalErr() here.
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
}
{
// Now validate the connection config, which might either be from
// the provisioner block itself or inherited from the resource's
// shared connection info.
w, e := n.validateConnConfig(*n.ConnConfig)
warns = append(warns, w...)
errs = append(errs, e...)
connDiags := n.validateConnConfig(ctx, n.ConnConfig, n.ResourceAddr)
diags = diags.Append(connDiags)
}
if len(warns) == 0 && len(errs) == 0 {
return nil, nil
}
return nil, &EvalValidateError{
Warnings: warns,
Errors: errs,
}
return nil, diags.NonFatalErr()
}
func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics {
// We can't comprehensively validate the connection config since its
// final structure is decided by the communicator and we can't instantiate
// that until we have a complete instance state. However, we *can* catch
@ -129,103 +179,228 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
// typos early rather than waiting until we actually try to run one of
// the resource's provisioners.
type connConfigSuperset struct {
// All attribute types are interface{} here because at this point we
// may still have unresolved interpolation expressions, which will
// appear as strings regardless of the final goal type.
var diags tfdiags.Diagnostics
Type interface{} `mapstructure:"type"`
User interface{} `mapstructure:"user"`
Password interface{} `mapstructure:"password"`
Host interface{} `mapstructure:"host"`
Port interface{} `mapstructure:"port"`
Timeout interface{} `mapstructure:"timeout"`
ScriptPath interface{} `mapstructure:"script_path"`
// We evaluate here just by evaluating the block and returning any
// diagnostics we get, since evaluation alone is enough to check for
// extraneous arguments and incorrectly-typed arguments.
_, _, configDiags := ctx.EvaluateBlock(config.Config, connectionBlockSupersetSchema, self)
diags = diags.Append(configDiags)
return diags
}
// connectionBlockSupersetSchema is a schema representing the superset of all
// possible arguments for "connection" blocks across all supported connection
// types.
//
// This currently lives here because we've not yet updated our communicator
// subsystem to be aware of schema itself. Once that is done, we can remove
// this and use a type-specific schema from the communicator to validate
// exactly what is expected for a given connection type.
var connectionBlockSupersetSchema = &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"type": {
Type: cty.String,
Required: true,
},
// Common attributes for both connection types
"user": {
Type: cty.String,
Required: false,
},
"password": {
Type: cty.String,
Required: false,
},
"host": {
Type: cty.String,
Required: false,
},
"port": {
Type: cty.Number,
Required: false,
},
"timeout": {
Type: cty.String,
Required: false,
},
"script_path": {
Type: cty.String,
Required: false,
},
// For type=ssh only (enforced in ssh communicator)
PrivateKey interface{} `mapstructure:"private_key"`
HostKey interface{} `mapstructure:"host_key"`
Agent interface{} `mapstructure:"agent"`
BastionHost interface{} `mapstructure:"bastion_host"`
BastionHostKey interface{} `mapstructure:"bastion_host_key"`
BastionPort interface{} `mapstructure:"bastion_port"`
BastionUser interface{} `mapstructure:"bastion_user"`
BastionPassword interface{} `mapstructure:"bastion_password"`
BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
AgentIdentity interface{} `mapstructure:"agent_identity"`
"private_key": {
Type: cty.String,
Required: false,
},
"host_key": {
Type: cty.String,
Required: false,
},
"agent": {
Type: cty.Bool,
Required: false,
},
"agent_identity": {
Type: cty.String,
Required: false,
},
"bastion_host": {
Type: cty.String,
Required: false,
},
"bastion_host_key": {
Type: cty.String,
Required: false,
},
"bastion_port": {
Type: cty.Number,
Required: false,
},
"bastion_user": {
Type: cty.String,
Required: false,
},
"bastion_password": {
Type: cty.String,
Required: false,
},
"bastion_private_key": {
Type: cty.String,
Required: false,
},
// For type=winrm only (enforced in winrm communicator)
HTTPS interface{} `mapstructure:"https"`
Insecure interface{} `mapstructure:"insecure"`
NTLM interface{} `mapstructure:"use_ntlm"`
CACert interface{} `mapstructure:"cacert"`
}
var metadata mapstructure.Metadata
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Metadata: &metadata,
Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
})
if err != nil {
// should never happen
errs = append(errs, err)
return
}
if err := decoder.Decode(connConfig.Config); err != nil {
errs = append(errs, err)
return
}
for _, attrName := range metadata.Unused {
errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
}
return
"https": {
Type: cty.Bool,
Required: false,
},
"insecure": {
Type: cty.Bool,
Required: false,
},
"cacert": {
Type: cty.String,
Required: false,
},
"use_ntlm": {
Type: cty.Bool,
Required: false,
},
},
}
// EvalValidateResource is an EvalNode implementation that validates
// the configuration of a resource.
type EvalValidateResource struct {
Provider *ResourceProvider
Config **ResourceConfig
ResourceName string
ResourceType string
ResourceMode config.ResourceMode
Addr addrs.ResourceInstance
Provider *ResourceProvider
ProviderSchema **ProviderSchema
Config *configs.Resource
// IgnoreWarnings means that warnings will not be passed through. This allows
// "just-in-time" passes of validation to continue execution through warnings.
IgnoreWarnings bool
// ConfigVal, if non-nil, will be updated with the value resulting from
// evaluating the given configuration body. Since validation is performed
// very early, this value is likely to contain lots of unknown values,
// but its type will conform to the schema of the resource type associated
// with the resource instance being validated.
ConfigVal *cty.Value
}
func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
var diags tfdiags.Diagnostics
provider := *n.Provider
cfg := *n.Config
schema := *n.ProviderSchema
mode := cfg.Mode
var warns []string
var errs []error
// Provider entry point varies depending on resource mode, because
// managed resources and data resources are two distinct concepts
// in the provider abstraction.
switch n.ResourceMode {
case config.ManagedResourceMode:
warns, errs = provider.ValidateResource(n.ResourceType, cfg)
case config.DataResourceMode:
warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
switch mode {
case addrs.ManagedResourceMode:
schema, exists := schema.ResourceTypes[cfg.Type]
if !exists {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid resource type",
Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type),
Subject: &cfg.TypeRange,
})
return nil, diags.Err()
}
configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
return nil, diags.Err()
}
// The provider API still expects our legacy types, so we must do some
// shimming here.
legacyCfg := NewResourceConfigShimmed(configVal, schema)
warns, errs = provider.ValidateResource(cfg.Type, legacyCfg)
if n.ConfigVal != nil {
*n.ConfigVal = configVal
}
case addrs.DataResourceMode:
schema, exists := schema.DataSources[cfg.Type]
if !exists {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid data source",
Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type),
Subject: &cfg.TypeRange,
})
return nil, diags.Err()
}
configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
return nil, diags.Err()
}
// The provider API still expects our legacy types, so we must do some
// shimming here.
legacyCfg := NewResourceConfigShimmed(configVal, schema)
warns, errs = provider.ValidateDataSource(cfg.Type, legacyCfg)
if n.ConfigVal != nil {
*n.ConfigVal = configVal
}
}
// If the resource name doesn't match the name regular
// expression, show an error.
if !config.NameRegexp.Match([]byte(n.ResourceName)) {
errs = append(errs, fmt.Errorf(
"%s: resource name can only contain letters, numbers, "+
"dashes, and underscores.", n.ResourceName))
// FIXME: Update the provider API to actually return diagnostics here,
// and then we can remove all this shimming and use its diagnostics
// directly.
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
if n.IgnoreWarnings {
// If we _only_ have warnings then we'll return nil.
if diags.HasErrors() {
return nil, diags.NonFatalErr()
}
return nil, nil
}
return nil, &EvalValidateError{
Warnings: warns,
Errors: errs,
} else {
// We'll return an error if there are any diagnostics at all, even if
// some of them are warnings.
return nil, diags.NonFatalErr()
}
}

View File

@ -3,72 +3,41 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
)
// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
// a configuration doesn't contain a reference to the resource itself.
//
// This must be done prior to interpolating configuration in order to avoid
// any infinite loop scenarios.
type EvalValidateResourceSelfRef struct {
Addr **ResourceAddress
Config **config.RawConfig
// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that
// expressions within a particular referencable block do not reference that
// same block.
type EvalValidateSelfRef struct {
Addr addrs.Referenceable
Config hcl.Body
Schema *configschema.Block
}
func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
addr := *n.Addr
conf := *n.Config
func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) {
var diags tfdiags.Diagnostics
addr := n.Addr
// Go through the variables and find self references
var errs []error
for k, raw := range conf.Variables {
rv, ok := raw.(*config.ResourceVariable)
if !ok {
continue
}
addrStr := addr.String()
// Build an address from the variable
varAddr := &ResourceAddress{
Path: addr.Path,
Mode: rv.Mode,
Type: rv.Type,
Name: rv.Name,
Index: rv.Index,
InstanceType: TypePrimary,
}
// If the variable access is a multi-access (*), then we just
// match the index so that we'll match our own addr if everything
// else matches.
if rv.Multi && rv.Index == -1 {
varAddr.Index = addr.Index
}
// This is a weird thing where ResourceAddres has index "-1" when
// index isn't set at all. This means index "0" for resource access.
// So, if we have this scenario, just set our varAddr to -1 so it
// matches.
if addr.Index == -1 && varAddr.Index == 0 {
varAddr.Index = -1
}
// If the addresses match, then this is a self reference
if varAddr.Equals(addr) && varAddr.Index == addr.Index {
errs = append(errs, fmt.Errorf(
"%s: self reference not allowed: %q",
addr, k))
refs, _ := lang.ReferencesInBlock(n.Config, n.Schema)
for _, ref := range refs {
if ref.Subject.String() == addrStr {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Self-referential block",
Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr),
Subject: ref.SourceRange.ToHCL().Ptr(),
})
}
}
// If no errors, no errors!
if len(errs) == 0 {
return nil, nil
}
// Wrap the errors in the proper wrapper so we can handle validation
// formatting properly upstream.
return nil, &EvalValidateError{
Errors: errs,
}
return nil, diags.NonFatalErr()
}

View File

@ -4,12 +4,17 @@ import (
"fmt"
"log"
"reflect"
"strconv"
"strings"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/helper/hilmapstructure"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
)
// EvalTypeCheckVariable is an EvalNode which ensures that the variable
@ -93,166 +98,88 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
return nil, nil
}
// EvalSetVariables is an EvalNode implementation that sets the variables
// explicitly for interpolation later.
type EvalSetVariables struct {
Module *string
Variables map[string]interface{}
// EvalSetModuleCallArguments is an EvalNode implementation that sets values
// for arguments of a child module call, for later retrieval during
// expression evaluation.
type EvalSetModuleCallArguments struct {
Module addrs.ModuleCallInstance
Values map[string]cty.Value
}
// TODO: test
func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
ctx.SetVariables(*n.Module, n.Variables)
func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) {
ctx.SetModuleCallArguments(n.Module, n.Values)
return nil, nil
}
// EvalVariableBlock is an EvalNode implementation that evaluates the
// given configuration, and uses the final values as a way to set the
// mapping.
type EvalVariableBlock struct {
Config **ResourceConfig
VariableValues map[string]interface{}
}
func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
// Clear out the existing mapping
for k, _ := range n.VariableValues {
delete(n.VariableValues, k)
}
// Get our configuration
rc := *n.Config
for k, v := range rc.Config {
vKind := reflect.ValueOf(v).Type().Kind()
switch vKind {
case reflect.Slice:
var vSlice []interface{}
if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
n.VariableValues[k] = vSlice
continue
}
case reflect.Map:
var vMap map[string]interface{}
if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
n.VariableValues[k] = vMap
continue
}
default:
var vString string
if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
n.VariableValues[k] = vString
continue
}
}
return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
}
for _, path := range rc.ComputedKeys {
log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
err := n.setUnknownVariableValueForPath(path)
if err != nil {
return nil, err
}
}
return nil, nil
}
func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
pathComponents := strings.Split(path, ".")
if len(pathComponents) < 1 {
return fmt.Errorf("No path comoponents in %s", path)
}
if len(pathComponents) == 1 {
// Special case the "top level" since we know the type
if _, ok := n.VariableValues[pathComponents[0]]; !ok {
n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
}
return nil
}
// Otherwise find the correct point in the tree and then set to unknown
var current interface{} = n.VariableValues[pathComponents[0]]
for i := 1; i < len(pathComponents); i++ {
switch tCurrent := current.(type) {
case []interface{}:
index, err := strconv.Atoi(pathComponents[i])
if err != nil {
return fmt.Errorf("Cannot convert %s to slice index in path %s",
pathComponents[i], path)
}
current = tCurrent[index]
case []map[string]interface{}:
index, err := strconv.Atoi(pathComponents[i])
if err != nil {
return fmt.Errorf("Cannot convert %s to slice index in path %s",
pathComponents[i], path)
}
current = tCurrent[index]
case map[string]interface{}:
if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
current = val
continue
}
tCurrent[pathComponents[i]] = config.UnknownVariableValue
break
}
}
return nil
}
// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
// bare map literal is indistinguishable from a list of maps w/ one element.
// EvalModuleCallArgument is an EvalNode implementation that produces the value
// for a particular variable as will be used by a child module instance.
//
// We take all the same inputs as EvalTypeCheckVariable above, since we need
// both the target type and the proposed value in order to properly coerce.
type EvalCoerceMapVariable struct {
Variables map[string]interface{}
ModulePath []string
ModuleTree *module.Tree
// The result is written into the map given in Values, with its key
// set to the local name of the variable, disregarding the module instance
// address. Any existing values in that map are deleted first. This weird
// interface is a result of trying to be convenient for use with
// EvalContext.SetModuleCallArguments, which expects a map to merge in with
// any existing arguments.
type EvalModuleCallArgument struct {
Addr addrs.InputVariable
Config *configs.Variable
Expr hcl.Expression
// If this flag is set, any diagnostics are discarded and this operation
// will always succeed, though may produce an unknown value in the
// event of an error.
IgnoreDiagnostics bool
Values map[string]cty.Value
}
// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
// details.
func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
currentTree := n.ModuleTree
for _, pathComponent := range n.ModulePath[1:] {
currentTree = currentTree.Children()[pathComponent]
}
targetConfig := currentTree.Config()
prototypes := make(map[string]config.VariableType)
for _, variable := range targetConfig.Variables {
prototypes[variable.Name] = variable.Type()
func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) {
// Clear out the existing mapping
for k := range n.Values {
delete(n.Values, k)
}
for name, declaredType := range prototypes {
if declaredType != config.VariableTypeMap {
continue
}
wantType := n.Config.Type
name := n.Addr.Name
expr := n.Expr
proposedValue, ok := n.Variables[name]
if !ok {
continue
}
if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
if m, ok := list[0].(map[string]interface{}); ok {
log.Printf("[DEBUG] EvalCoerceMapVariable: "+
"Coercing single element list into map: %#v", m)
n.Variables[name] = m
}
}
if expr == nil {
// Should never happen, but we'll bail out early here rather than
// crash in case it does. We set no value at all in this case,
// making a subsequent call to EvalContext.SetModuleCallArguments
// a no-op.
log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String())
return nil, nil
}
return nil, nil
val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil)
// We intentionally passed DynamicPseudoType to EvaluateExpr above because
// now we can do our own local type conversion and produce an error message
// with better context if it fails.
var convErr error
val, convErr = convert.Convert(val, wantType)
if convErr != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid value for module argument",
Detail: fmt.Sprintf(
"The given value is not suitable for child module variable %q defined at %s: %s.",
name, n.Config.DeclRange.String(), convErr,
),
Subject: expr.Range().Ptr(),
})
// We'll return a placeholder unknown value to avoid producing
// redundant downstream errors.
val = cty.UnknownVal(wantType)
}
n.Values[name] = val
if n.IgnoreDiagnostics {
return nil, nil
}
return nil, diags.ErrWithWarnings()
}
// hclTypeName returns the name of the type that would represent this value in

View File

@ -1,23 +1,22 @@
package terraform
import (
"strings"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
)
// ProviderEvalTree returns the evaluation tree for initializing and
// configuring providers.
func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode {
func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode {
var provider ResourceProvider
var resourceConfig *ResourceConfig
typeName := strings.SplitN(n.NameValue, ".", 2)[0]
addr := n.Addr
relAddr := addr.ProviderConfig
seq := make([]EvalNode, 0, 5)
seq = append(seq, &EvalInitProvider{
TypeName: typeName,
Name: n.Name(),
TypeName: relAddr.Type,
Addr: addr.ProviderConfig,
})
// Input stuff
@ -26,22 +25,13 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalGetProvider{
Name: n.Name(),
Addr: addr,
Output: &provider,
},
&EvalInterpolateProvider{
Config: config,
Output: &resourceConfig,
},
&EvalBuildProviderConfig{
Provider: n.NameValue,
Config: &resourceConfig,
Output: &resourceConfig,
},
&EvalInputProvider{
Name: n.NameValue,
Addr: relAddr,
Provider: &provider,
Config: &resourceConfig,
Config: config,
},
},
},
@ -52,21 +42,13 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalGetProvider{
Name: n.Name(),
Addr: addr,
Output: &provider,
},
&EvalInterpolateProvider{
Config: config,
Output: &resourceConfig,
},
&EvalBuildProviderConfig{
Provider: n.NameValue,
Config: &resourceConfig,
Output: &resourceConfig,
},
&EvalValidateProvider{
Addr: relAddr,
Provider: &provider,
Config: &resourceConfig,
Config: config,
},
},
},
@ -78,18 +60,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalGetProvider{
Name: n.Name(),
Addr: addr,
Output: &provider,
},
&EvalInterpolateProvider{
Config: config,
Output: &resourceConfig,
},
&EvalBuildProviderConfig{
Provider: n.NameValue,
Config: &resourceConfig,
Output: &resourceConfig,
},
},
},
})
@ -101,8 +74,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalConfigProvider{
Provider: n.Name(),
Config: &resourceConfig,
Addr: relAddr,
Provider: &provider,
Config: config,
},
},
},
@ -113,6 +87,6 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
// CloseProviderEvalTree returns the evaluation tree for closing
// provider connections that aren't needed anymore.
func CloseProviderEvalTree(n string) EvalNode {
return &EvalCloseProvider{Name: n}
func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode {
return &EvalCloseProvider{Addr: addr.ProviderConfig}
}

100
terraform/evaluate.go Normal file
View File

@ -0,0 +1,100 @@
package terraform
import (
"sync"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/lang"
)
// Evaluator provides the necessary contextual data for evaluating expressions
// for a particular walk operation.
type Evaluator struct {
// Operation defines what type of operation this evaluator is being used
// for.
Operation walkOperation
// Meta is contextual metadata about the current operation.
Meta *ContextMeta
// Config is the root node in the configuration tree.
Config *configs.Config
// RootVariableValues is a map of values for variables defined in the
// root module, passed in from external sources. This must not be
// modified during evaluation.
RootVariableValues map[string]*InputValue
// State is the current state. During some operations this structure
// is mutated concurrently, and so it must be accessed only while holding
// StateLock.
State *State
StateLock *sync.RWMutex
}
// Scope creates an evaluation scope for the given module path and optional
// resource.
//
// If the "self" argument is nil then the "self" object is not available
// in evaluated expressions. Otherwise, it behaves as an alias for the given
// address.
func (e *Evaluator) Scope(modulePath addrs.ModuleInstance, self addrs.Referenceable) *lang.Scope {
return &lang.Scope{
Data: &evaluationStateData{
Evaluator: e,
ModulePath: modulePath,
},
SelfAddr: self,
PureOnly: e.Operation != walkApply && e.Operation != walkDestroy,
BaseDir: ".", // Always current working directory for now.
}
}
// evaluationStateData is an implementation of lang.Data that resolves
// references primarily (but not exclusively) using information from a State.
type evaluationStateData struct {
Evaluator *Evaluator
// ModulePath is the path through the dynamic module tree to the module
// that references will be resolved relative to.
ModulePath addrs.ModuleInstance
}
// evaluationStateData must implement lang.Data
var _ lang.Data = (*evaluationStateData)(nil)
func (d *evaluationStateData) GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}
func (d *evaluationStateData) GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
panic("not yet implemented")
}

View File

@ -4,17 +4,14 @@ import (
"fmt"
"log"
"runtime/debug"
"strings"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/dag"
)
// RootModuleName is the name given to the root module implicitly.
const RootModuleName = "root"
// RootModulePath is the path for the root module.
var RootModulePath = []string{RootModuleName}
// Graph represents the graph that Terraform uses to represent resources
// and their dependencies.
type Graph struct {
@ -23,9 +20,7 @@ type Graph struct {
dag.AcyclicGraph
// Path is the path in the module tree that this Graph represents.
// The root is represented by a single element list containing
// RootModuleName
Path []string
Path addrs.ModuleInstance
// debugName is a name for reference in the debug output. This is usually
// to indicate what topmost builder was, and if this graph is a shadow or
@ -40,17 +35,17 @@ func (g *Graph) DirectedGraph() dag.Grapher {
// Walk walks the graph with the given walker for callbacks. The graph
// will be walked with full parallelism, so the walker should expect
// to be called in concurrently.
func (g *Graph) Walk(walker GraphWalker) error {
func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics {
return g.walk(walker)
}
func (g *Graph) walk(walker GraphWalker) error {
func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
// The callbacks for enter/exiting a graph
ctx := walker.EnterPath(g.Path)
defer walker.ExitPath(g.Path)
// Get the path for logs
path := strings.Join(ctx.Path(), ".")
path := ctx.Path().String()
// Determine if our walker is a panic wrapper
panicwrap, ok := walker.(GraphWalkerPanicwrapper)
@ -69,14 +64,16 @@ func (g *Graph) walk(walker GraphWalker) error {
// Walk the graph.
var walkFn dag.WalkFunc
walkFn = func(v dag.Vertex) (rerr error) {
log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v))
walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) {
log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v)
g.DebugVisitInfo(v, g.debugName)
// If we have a panic wrap GraphWalker and a panic occurs, recover
// and call that. We ensure the return value is an error, however,
// so that future nodes are not called.
defer func() {
log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v))
// If no panicwrap, do nothing
if panicwrap == nil {
return
@ -89,22 +86,21 @@ func (g *Graph) walk(walker GraphWalker) error {
}
// Modify the return value to show the error
rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
dag.VertexName(v), err, debug.Stack())
diags = diags.Append(fmt.Errorf("vertex %q captured panic: %s\n\n%s", dag.VertexName(v), err, debug.Stack()))
// Call the panic wrapper
panicwrap.Panic(v, err)
}()
walker.EnterVertex(v)
defer walker.ExitVertex(v, rerr)
defer walker.ExitVertex(v, diags)
// vertexCtx is the context that we use when evaluating. This
// is normally the context of our graph but can be overridden
// with a GraphNodeSubPath impl.
vertexCtx := ctx
if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
vertexCtx = walker.EnterPath(pn.Path())
defer walker.ExitPath(pn.Path())
}
@ -112,60 +108,64 @@ func (g *Graph) walk(walker GraphWalker) error {
if ev, ok := v.(GraphNodeEvalable); ok {
tree := ev.EvalTree()
if tree == nil {
panic(fmt.Sprintf(
"%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v))
}
// Allow the walker to change our tree if needed. Eval,
// then callback with the output.
log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v))
log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v))
g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
tree = walker.EnterEvalTree(v, tree)
output, err := Eval(tree, vertexCtx)
if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
diags = diags.Append(walker.ExitEvalTree(v, output, err))
if diags.HasErrors() {
return
}
}
// If the node is dynamically expanded, then expand it
if ev, ok := v.(GraphNodeDynamicExpandable); ok {
log.Printf(
"[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph",
path,
dag.VertexName(v))
log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v))
g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
g, err := ev.DynamicExpand(vertexCtx)
if err != nil {
rerr = err
diags = diags.Append(err)
return
}
if g != nil {
// Walk the subgraph
if rerr = g.walk(walker); rerr != nil {
log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v))
subDiags := g.walk(walker)
diags = diags.Append(subDiags)
if subDiags.HasErrors() {
log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v))
return
}
log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v))
} else {
log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v))
}
}
// If the node has a subgraph, then walk the subgraph
if sn, ok := v.(GraphNodeSubgraph); ok {
log.Printf(
"[TRACE] vertex '%s.%s': walking subgraph",
path,
dag.VertexName(v))
log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v))
g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
subDiags := sn.Subgraph().(*Graph).walk(walker)
if subDiags.HasErrors() {
log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v))
return
}
log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v))
}
return nil
return
}
return g.AcyclicGraph.Walk(walkFn)

View File

@ -4,6 +4,10 @@ import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
)
// GraphBuilder is an interface that can be implemented and used with
@ -12,7 +16,7 @@ type GraphBuilder interface {
// Build builds the graph for the given module path. It is up to
// the interface implementation whether this build should expand
// the graph or not.
Build(path []string) (*Graph, error)
Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics)
}
// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
@ -25,7 +29,8 @@ type BasicGraphBuilder struct {
Name string
}
func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
g := &Graph{Path: path}
debugName := "graph.json"
@ -36,10 +41,12 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
g.SetDebugWriter(debugBuf)
defer debugBuf.Close()
var lastStepStr string
for _, step := range b.Steps {
if step == nil {
continue
}
log.Printf("[TRACE] Executing graph transform %T", step)
stepName := fmt.Sprintf("%T", step)
dot := strings.LastIndex(stepName, ".")
@ -56,12 +63,20 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
}
debugOp.End(errMsg)
log.Printf(
"[TRACE] Graph after step %T:\n\n%s",
step, g.StringWithNodeTypes())
if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr {
log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr)
lastStepStr = thisStepStr
} else {
log.Printf("[TRACE] Completed graph transform %T (no changes)", step)
}
if err != nil {
return g, err
if nf, isNF := err.(tfdiags.NonFatalError); isNF {
diags = diags.Append(nf.Diagnostics)
} else {
diags = diags.Append(err)
return g, diags
}
}
}
@ -69,9 +84,10 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
if b.Validate {
if err := g.Validate(); err != nil {
log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
return nil, err
diags = diags.Append(err)
return nil, diags
}
}
return g, nil
return g, diags
}

View File

@ -1,8 +1,10 @@
package terraform
import (
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
)
// ApplyGraphBuilder implements GraphBuilder and is responsible for building
@ -13,8 +15,8 @@ import (
// that aren't explicitly in the diff. There are other scenarios where the
// diff can be deviated, so this is just one layer of protection.
type ApplyGraphBuilder struct {
// Module is the root module for the graph to build.
Module *module.Tree
// Config is the configuration tree that the diff was built from.
Config *configs.Config
// Diff is the diff to apply.
Diff *Diff
@ -32,7 +34,7 @@ type ApplyGraphBuilder struct {
// unnecessary outputs aren't included in the apply graph. The plan
// builder successfully handles targeting resources. In the future,
// outputs should go into the diff so that this is unnecessary.
Targets []string
Targets []addrs.Targetable
// DisableReduce, if true, will not reduce the graph. Great for testing.
DisableReduce bool
@ -45,7 +47,7 @@ type ApplyGraphBuilder struct {
}
// See GraphBuilder
func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: b.Validate,
@ -62,9 +64,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
}
}
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeApplyableResource{
NodeAbstractResource: a,
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
return &NodeApplyableResourceInstance{
NodeAbstractResourceInstance: a,
}
}
@ -72,29 +74,26 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
// Creates all the nodes represented in the diff.
&DiffTransformer{
Concrete: concreteResource,
Diff: b.Diff,
Module: b.Module,
State: b.State,
Diff: b.Diff,
},
// Create orphan output nodes
&OrphanOutputTransformer{Module: b.Module, State: b.State},
&OrphanOutputTransformer{Config: b.Config, State: b.State},
// Attach the configuration to any resources
&AttachResourceConfigTransformer{Module: b.Module},
&AttachResourceConfigTransformer{Config: b.Config},
// Attach the state
&AttachStateTransformer{State: b.State},
// add providers
TransformProviders(b.Providers, concreteProvider, b.Module),
TransformProviders(b.Providers, concreteProvider, b.Config),
// Destruction ordering
&DestroyEdgeTransformer{Module: b.Module, State: b.State},
&DestroyEdgeTransformer{Config: b.Config, State: b.State},
GraphTransformIf(
func() bool { return !b.Destroy },
&CBDEdgeTransformer{Module: b.Module, State: b.State},
&CBDEdgeTransformer{Config: b.Config, State: b.State},
),
// Provisioner-related transformations
@ -102,19 +101,19 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
&ProvisionerTransformer{},
// Add root variables
&RootVariableTransformer{Module: b.Module},
&RootVariableTransformer{Config: b.Config},
// Add the local values
&LocalTransformer{Module: b.Module},
&LocalTransformer{Config: b.Config},
// Add the outputs
&OutputTransformer{Module: b.Module},
&OutputTransformer{Config: b.Config},
// Add module variables
&ModuleVariableTransformer{Module: b.Module},
&ModuleVariableTransformer{Config: b.Config},
// Remove modules no longer present in the config
&RemovedModuleTransformer{Module: b.Module, State: b.State},
&RemovedModuleTransformer{Config: b.Config, State: b.State},
// Connect references so ordering is correct
&ReferenceTransformer{},

View File

@ -1,8 +1,10 @@
package terraform
import (
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
)
// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
@ -11,21 +13,21 @@ import (
// Planning a pure destroy operation is simple because we can ignore most
// ordering configuration and simply reverse the state.
type DestroyPlanGraphBuilder struct {
// Module is the root module for the graph to build.
Module *module.Tree
// Config is the configuration tree to build the plan from.
Config *configs.Config
// State is the current state
State *State
// Targets are resources to target
Targets []string
Targets []addrs.Targetable
// Validate will do structural validation of the graph.
Validate bool
}
// See GraphBuilder
func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: b.Validate,
@ -35,25 +37,25 @@ func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
// See GraphBuilder
func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodePlanDestroyableResource{
NodeAbstractResource: a,
concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
return &NodePlanDestroyableResourceInstance{
NodeAbstractResourceInstance: a,
}
}
steps := []GraphTransformer{
// Creates all the nodes represented in the state.
&StateTransformer{
Concrete: concreteResource,
Concrete: concreteResourceInstance,
State: b.State,
},
// Attach the configuration to any resources
&AttachResourceConfigTransformer{Module: b.Module},
&AttachResourceConfigTransformer{Config: b.Config},
// Destruction ordering. We require this only so that
// targeting below will prune the correct things.
&DestroyEdgeTransformer{Module: b.Module, State: b.State},
&DestroyEdgeTransformer{Config: b.Config, State: b.State},
// Target. Note we don't set "Destroy: true" here since we already
// created proper destroy ordering.

View File

@ -1,8 +1,10 @@
package terraform
import (
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
)
// ImportGraphBuilder implements GraphBuilder and is responsible for building
@ -12,15 +14,15 @@ type ImportGraphBuilder struct {
// ImportTargets are the list of resources to import.
ImportTargets []*ImportTarget
// Module is the module to add to the graph. See ImportOpts.Module.
Module *module.Tree
// Module is a configuration to build the graph from. See ImportOpts.Config.
Config *configs.Config
// Providers is the list of providers supported.
Providers []string
}
// Build builds the graph according to the steps returned by Steps.
func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: true,
@ -33,9 +35,9 @@ func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
func (b *ImportGraphBuilder) Steps() []GraphTransformer {
// Get the module. If we don't have one, we just use an empty tree
// so that the transform still works but does nothing.
mod := b.Module
if mod == nil {
mod = module.NewEmptyTree()
config := b.Config
if config == nil {
config = configs.NewEmptyConfig()
}
// Custom factory for creating providers.
@ -47,12 +49,12 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer {
steps := []GraphTransformer{
// Create all our resources from the configuration and state
&ConfigTransformer{Module: mod},
&ConfigTransformer{Config: config},
// Add the import steps
&ImportStateTransformer{Targets: b.ImportTargets},
TransformProviders(b.Providers, concreteProvider, mod),
TransformProviders(b.Providers, concreteProvider, config),
// This validates that the providers only depend on variables
&ImportProviderValidateTransformer{},

View File

@ -3,7 +3,11 @@ package terraform
import (
"sync"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
)
@ -19,8 +23,8 @@ import (
// create-before-destroy can be completely ignored.
//
type PlanGraphBuilder struct {
// Module is the root module for the graph to build.
Module *module.Tree
// Config is the configuration tree to build a plan from.
Config *configs.Config
// State is the current state
State *State
@ -32,7 +36,7 @@ type PlanGraphBuilder struct {
Provisioners []string
// Targets are resources to target
Targets []string
Targets []addrs.Targetable
// DisableReduce, if true, will not reduce the graph. Great for testing.
DisableReduce bool
@ -46,13 +50,13 @@ type PlanGraphBuilder struct {
CustomConcrete bool
ConcreteProvider ConcreteProviderNodeFunc
ConcreteResource ConcreteResourceNodeFunc
ConcreteResourceOrphan ConcreteResourceNodeFunc
ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc
once sync.Once
}
// See GraphBuilder
func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: b.Validate,
@ -68,38 +72,38 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
// Creates all the resources represented in the config
&ConfigTransformer{
Concrete: b.ConcreteResource,
Module: b.Module,
Config: b.Config,
},
// Add the local values
&LocalTransformer{Module: b.Module},
&LocalTransformer{Config: b.Config},
// Add the outputs
&OutputTransformer{Module: b.Module},
&OutputTransformer{Config: b.Config},
// Add orphan resources
&OrphanResourceTransformer{
Concrete: b.ConcreteResourceOrphan,
State: b.State,
Module: b.Module,
Config: b.Config,
},
// Create orphan output nodes
&OrphanOutputTransformer{
Module: b.Module,
Config: b.Config,
State: b.State,
},
// Attach the configuration to any resources
&AttachResourceConfigTransformer{Module: b.Module},
&AttachResourceConfigTransformer{Config: b.Config},
// Attach the state
&AttachStateTransformer{State: b.State},
// Add root variables
&RootVariableTransformer{Module: b.Module},
&RootVariableTransformer{Config: b.Config},
TransformProviders(b.Providers, b.ConcreteProvider, b.Module),
TransformProviders(b.Providers, b.ConcreteProvider, b.Config),
// Provisioner-related transformations. Only add these if requested.
GraphTransformIf(
@ -112,11 +116,11 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
// Add module variables
&ModuleVariableTransformer{
Module: b.Module,
Config: b.Config,
},
// Remove modules no longer present in the config
&RemovedModuleTransformer{Module: b.Module, State: b.State},
&RemovedModuleTransformer{Config: b.Config, State: b.State},
// Connect so that the references are ready for targeting. We'll
// have to connect again later for providers and so on.
@ -167,15 +171,13 @@ func (b *PlanGraphBuilder) init() {
b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
return &NodePlannableResource{
NodeAbstractCountResource: &NodeAbstractCountResource{
NodeAbstractResource: a,
},
}
}
b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
return &NodePlannableResourceOrphan{
NodeAbstractResource: a,
}
}
b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex {
return &NodePlannableResourceInstanceOrphan{
NodeAbstractResourceInstance: a,
}
}
}

View File

@ -3,8 +3,10 @@ package terraform
import (
"log"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
)
@ -21,8 +23,8 @@ import (
// create-before-destroy can be completely ignored.
//
type RefreshGraphBuilder struct {
// Module is the root module for the graph to build.
Module *module.Tree
// Config is the configuration tree.
Config *configs.Config
// State is the current state
State *State
@ -31,7 +33,7 @@ type RefreshGraphBuilder struct {
Providers []string
// Targets are resources to target
Targets []string
Targets []addrs.Targetable
// DisableReduce, if true, will not reduce the graph. Great for testing.
DisableReduce bool
@ -41,7 +43,7 @@ type RefreshGraphBuilder struct {
}
// See GraphBuilder
func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: b.Validate,
@ -60,23 +62,19 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeRefreshableManagedResource{
NodeAbstractCountResource: &NodeAbstractCountResource{
NodeAbstractResource: a,
},
NodeAbstractResource: a,
}
}
concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex {
concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
return &NodeRefreshableManagedResourceInstance{
NodeAbstractResource: a,
NodeAbstractResourceInstance: a,
}
}
concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeRefreshableDataResource{
NodeAbstractCountResource: &NodeAbstractCountResource{
NodeAbstractResource: a,
},
NodeAbstractResource: a,
}
}
@ -88,13 +86,13 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
if b.State.HasResources() {
return &ConfigTransformer{
Concrete: concreteManagedResource,
Module: b.Module,
Config: b.Config,
Unique: true,
ModeFilter: true,
Mode: config.ManagedResourceMode,
Mode: addrs.ManagedResourceMode,
}
}
log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer")
log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer")
return nil
}(),
@ -102,10 +100,10 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
// add any orphans from scaling in as destroy nodes.
&ConfigTransformer{
Concrete: concreteDataResource,
Module: b.Module,
Config: b.Config,
Unique: true,
ModeFilter: true,
Mode: config.DataResourceMode,
Mode: addrs.DataResourceMode,
},
// Add any fully-orphaned resources from config (ones that have been
@ -114,28 +112,28 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
&OrphanResourceTransformer{
Concrete: concreteManagedResourceInstance,
State: b.State,
Module: b.Module,
Config: b.Config,
},
// Attach the state
&AttachStateTransformer{State: b.State},
// Attach the configuration to any resources
&AttachResourceConfigTransformer{Module: b.Module},
&AttachResourceConfigTransformer{Config: b.Config},
// Add root variables
&RootVariableTransformer{Module: b.Module},
&RootVariableTransformer{Config: b.Config},
TransformProviders(b.Providers, concreteProvider, b.Module),
TransformProviders(b.Providers, concreteProvider, b.Config),
// Add the local values
&LocalTransformer{Module: b.Module},
&LocalTransformer{Config: b.Config},
// Add the outputs
&OutputTransformer{Module: b.Module},
&OutputTransformer{Config: b.Config},
// Add module variables
&ModuleVariableTransformer{Module: b.Module},
&ModuleVariableTransformer{Config: b.Config},
// Connect so that the references are ready for targeting. We'll
// have to connect again later for providers and so on.

View File

@ -23,9 +23,7 @@ func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
return &NodeValidatableResource{
NodeAbstractCountResource: &NodeAbstractCountResource{
NodeAbstractResource: a,
},
NodeAbstractResource: a,
}
}

View File

@ -1,7 +1,11 @@
package terraform
import (
"github.com/hashicorp/terraform/addrs"
)
// GraphNodeSubPath says that a node is part of a graph with a
// different path, and the context should be adjusted accordingly.
type GraphNodeSubPath interface {
Path() []string
Path() addrs.ModuleInstance
}

View File

@ -1,18 +1,20 @@
package terraform
import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
)
// GraphWalker is an interface that can be implemented that when used
// with Graph.Walk will invoke the given callbacks under certain events.
type GraphWalker interface {
EnterPath([]string) EvalContext
ExitPath([]string)
EnterPath(addrs.ModuleInstance) EvalContext
ExitPath(addrs.ModuleInstance)
EnterVertex(dag.Vertex)
ExitVertex(dag.Vertex, error)
ExitVertex(dag.Vertex, tfdiags.Diagnostics)
EnterEvalTree(dag.Vertex, EvalNode) EvalNode
ExitEvalTree(dag.Vertex, interface{}, error) error
ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics
}
// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
@ -50,11 +52,11 @@ func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
// implementing all the required functions.
type NullGraphWalker struct{}
func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
func (NullGraphWalker) ExitPath([]string) {}
func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) }
func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {}
func (NullGraphWalker) EnterVertex(dag.Vertex) {}
func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {}
func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics {
return nil
}

View File

@ -2,11 +2,14 @@ package terraform
import (
"context"
"fmt"
"log"
"sync"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/dag"
)
@ -20,10 +23,9 @@ type ContextGraphWalker struct {
Operation walkOperation
StopContext context.Context
// Outputs, do not set these. Do not read these while the graph
// is being walked.
ValidationWarnings []string
ValidationErrors []error
// This is an output. Do not set this, nor read it while a graph walk
// is in progress.
NonFatalDiagnostics tfdiags.Diagnostics
errorLock sync.Mutex
once sync.Once
@ -32,19 +34,21 @@ type ContextGraphWalker struct {
interpolaterVars map[string]map[string]interface{}
interpolaterVarLock sync.Mutex
providerCache map[string]ResourceProvider
providerSchemas map[string]*ProviderSchema
providerLock sync.Mutex
provisionerCache map[string]ResourceProvisioner
provisionerSchemas map[string]*configschema.Block
provisionerLock sync.Mutex
}
func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext {
w.once.Do(w.init)
w.contextLock.Lock()
defer w.contextLock.Unlock()
// If we already have a context for this path cached, use that
key := PathCacheKey(path)
key := path.String()
if ctx, ok := w.contexts[key]; ok {
return ctx
}
@ -65,6 +69,13 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
w.interpolaterVars[key] = variables
w.interpolaterVarLock.Unlock()
// Our evaluator shares some locks with the main context and the walker
// so that we can safely run multiple evaluations at once across
// different modules.
evaluator := &Evaluator{
StateLock: &w.Context.stateLock,
}
ctx := &BuiltinEvalContext{
StopContext: w.StopContext,
PathValue: path,
@ -80,17 +91,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
DiffLock: &w.Context.diffLock,
StateValue: w.Context.state,
StateLock: &w.Context.stateLock,
Interpolater: &Interpolater{
Operation: w.Operation,
Meta: w.Context.meta,
Module: w.Context.module,
State: w.Context.state,
StateLock: &w.Context.stateLock,
VariableValues: variables,
VariableValuesLock: &w.interpolaterVarLock,
},
InterpolaterVars: w.interpolaterVars,
InterpolaterVarLock: &w.interpolaterVarLock,
Evaluator: evaluator,
}
w.contexts[key] = ctx
@ -98,8 +99,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
}
func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
log.Printf("[TRACE] [%s] Entering eval tree: %s",
w.Operation, dag.VertexName(v))
log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v))
// Acquire a lock on the semaphore
w.Context.parallelSem.Acquire()
@ -109,10 +109,8 @@ func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
return EvalFilter(n, EvalNodeFilterOp(w.Operation))
}
func (w *ContextGraphWalker) ExitEvalTree(
v dag.Vertex, output interface{}, err error) error {
log.Printf("[TRACE] [%s] Exiting eval tree: %s",
w.Operation, dag.VertexName(v))
func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics {
log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v))
// Release the semaphore
w.Context.parallelSem.Release()
@ -125,30 +123,28 @@ func (w *ContextGraphWalker) ExitEvalTree(
w.errorLock.Lock()
defer w.errorLock.Unlock()
// Try to get a validation error out of it. If its not a validation
// error, then just record the normal error.
verr, ok := err.(*EvalValidateError)
if !ok {
return err
// If the error is non-fatal then we'll accumulate its diagnostics in our
// non-fatal list, rather than returning it directly, so that the graph
// walk can continue.
if nferr, ok := err.(tfdiags.NonFatalError); ok {
w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics)
return nil
}
for _, msg := range verr.Warnings {
w.ValidationWarnings = append(
w.ValidationWarnings,
fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
}
for _, e := range verr.Errors {
w.ValidationErrors = append(
w.ValidationErrors,
errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
}
return nil
// Otherwise, we'll let our usual diagnostics machinery figure out how to
// unpack this as one or more diagnostic messages and return that. If we
// get down here then the returned diagnostics will contain at least one
// error, causing the graph walk to halt.
var diags tfdiags.Diagnostics
diags = diags.Append(err)
return diags
}
func (w *ContextGraphWalker) init() {
w.contexts = make(map[string]*BuiltinEvalContext, 5)
w.providerCache = make(map[string]ResourceProvider, 5)
w.provisionerCache = make(map[string]ResourceProvisioner, 5)
w.interpolaterVars = make(map[string]map[string]interface{}, 5)
w.contexts = make(map[string]*BuiltinEvalContext)
w.providerCache = make(map[string]ResourceProvider)
w.providerSchemas = make(map[string]*ProviderSchema)
w.provisionerCache = make(map[string]ResourceProvisioner)
w.provisionerSchemas = make(map[string]*configschema.Block)
w.interpolaterVars = make(map[string]map[string]interface{})
}

View File

@ -45,65 +45,7 @@ type InterpolationScope struct {
func (i *Interpolater) Values(
scope *InterpolationScope,
vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
if scope == nil {
scope = &InterpolationScope{}
}
result := make(map[string]ast.Variable, len(vars))
// Copy the default variables
if i.Module != nil && scope != nil {
mod := i.Module
if len(scope.Path) > 1 {
mod = i.Module.Child(scope.Path[1:])
}
for _, v := range mod.Config().Variables {
// Set default variables
if v.Default == nil {
continue
}
n := fmt.Sprintf("var.%s", v.Name)
variable, err := hil.InterfaceToVariable(v.Default)
if err != nil {
return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
}
result[n] = variable
}
}
for n, rawV := range vars {
var err error
switch v := rawV.(type) {
case *config.CountVariable:
err = i.valueCountVar(scope, n, v, result)
case *config.ModuleVariable:
err = i.valueModuleVar(scope, n, v, result)
case *config.PathVariable:
err = i.valuePathVar(scope, n, v, result)
case *config.ResourceVariable:
err = i.valueResourceVar(scope, n, v, result)
case *config.SelfVariable:
err = i.valueSelfVar(scope, n, v, result)
case *config.SimpleVariable:
err = i.valueSimpleVar(scope, n, v, result)
case *config.TerraformVariable:
err = i.valueTerraformVar(scope, n, v, result)
case *config.LocalVariable:
err = i.valueLocalVar(scope, n, v, result)
case *config.UserVariable:
err = i.valueUserVar(scope, n, v, result)
default:
err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
}
if err != nil {
return nil, err
}
}
return result, nil
return nil, fmt.Errorf("type Interpolator is no longer supported; use the evaluator API instead")
}
func (i *Interpolater) valueCountVar(
@ -153,7 +95,7 @@ func (i *Interpolater) valueModuleVar(
defer i.StateLock.RUnlock()
// Get the module where we're looking for the value
mod := i.State.ModuleByPath(path)
mod := i.State.ModuleByPath(normalizeModulePath(path))
if mod == nil {
// If the module doesn't exist, then we can return an empty string.
// This happens usually only in Refresh() when we haven't populated
@ -365,7 +307,7 @@ func (i *Interpolater) valueLocalVar(
}
// Get the relevant module
module := i.State.ModuleByPath(scope.Path)
module := i.State.ModuleByPath(normalizeModulePath(scope.Path))
if module == nil {
result[n] = unknownVariable()
return nil
@ -776,7 +718,7 @@ func (i *Interpolater) resourceVariableInfo(
}
// Get the relevant module
module := i.State.ModuleByPath(scope.Path)
module := i.State.ModuleByPath(normalizeModulePath(scope.Path))
return module, cr, nil
}

View File

@ -101,15 +101,15 @@ func configTreeConfigDependencies(root *configs.Config, inheritProviders map[str
// dependency, though we'll only record it if there isn't already
// an explicit dependency on the same provider.
for _, rc := range module.ManagedResources {
fullName := rc.ProviderConfigKey()
inst := moduledeps.ProviderInstance(fullName)
addr := rc.ProviderConfigAddr()
inst := moduledeps.ProviderInstance(addr.StringCompact())
if _, exists := providers[inst]; exists {
// Explicit dependency already present
continue
}
reason := moduledeps.ProviderDependencyImplicit
if _, inherited := inheritProviders[fullName]; inherited {
if _, inherited := inheritProviders[addr.String()]; inherited {
reason = moduledeps.ProviderDependencyInherited
}
@ -119,15 +119,15 @@ func configTreeConfigDependencies(root *configs.Config, inheritProviders map[str
}
}
for _, rc := range module.DataResources {
fullName := rc.ProviderConfigKey()
inst := moduledeps.ProviderInstance(fullName)
addr := rc.ProviderConfigAddr()
inst := moduledeps.ProviderInstance(addr.StringCompact())
if _, exists := providers[inst]; exists {
// Explicit dependency already present
continue
}
reason := moduledeps.ProviderDependencyImplicit
if _, inherited := inheritProviders[fullName]; inherited {
if _, inherited := inheritProviders[addr.String()]; inherited {
reason = moduledeps.ProviderDependencyInherited
}
@ -161,10 +161,11 @@ func configTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
findModule := func(path []string) *moduledeps.Module {
module := root
for _, name := range path[1:] { // skip initial "root"
realPath := normalizeModulePath(path)
for _, step := range realPath { // skip initial "root"
var next *moduledeps.Module
for _, cm := range module.Children {
if cm.Name == name {
if cm.Name == step.Name {
next = cm
break
}
@ -173,7 +174,7 @@ func configTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
if next == nil {
// If we didn't find a next node, we'll need to make one
next = &moduledeps.Module{
Name: name,
Name: step.Name,
}
module.Children = append(module.Children, next)
}

View File

@ -1,17 +1,17 @@
package terraform
// NodeDestroyableDataResource represents a resource that is "plannable":
// it is ready to be planned in order to create a diff.
// NodeDestroyableDataResource represents a resource that is "destroyable":
// it is ready to be destroyed.
type NodeDestroyableDataResource struct {
*NodeAbstractResource
*NodeAbstractResourceInstance
}
// GraphNodeEvalable
func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Just destroy it.
var state *InstanceState

View File

@ -2,46 +2,62 @@ package terraform
import (
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// NodeRefreshableDataResource represents a resource that is "plannable":
// it is ready to be planned in order to create a diff.
// NodeRefreshableDataResource represents a resource that is "refreshable".
type NodeRefreshableDataResource struct {
*NodeAbstractCountResource
*NodeAbstractResource
}
var (
_ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil)
_ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil)
_ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil)
_ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil)
_ GraphNodeResource = (*NodeRefreshableDataResource)(nil)
_ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil)
)
// GraphNodeDynamicExpandable
func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
var diags tfdiags.Diagnostics
count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
diags = diags.Append(countDiags)
if countDiags.HasErrors() {
return nil, diags.Err()
}
// Next we need to potentially rename an instance address in the state
// if we're transitioning whether "count" is set at all.
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
// Grab the state which we read
state, lock := ctx.State()
lock.RLock()
defer lock.RUnlock()
// Expand the resource count which must be available by now from EvalTree
count, err := n.Config.Count()
if err != nil {
return nil, err
}
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
return &NodeRefreshableDataResourceInstance{
NodeAbstractResource: a,
NodeAbstractResourceInstance: a,
}
}
// We also need a destroyable resource for orphans that are a result of a
// scaled-in count.
concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex {
concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config since we don't do that via transforms
a.Config = n.Config
return &NodeDestroyableDataResource{
NodeAbstractResource: a,
NodeAbstractResourceInstance: a,
}
}
@ -67,7 +83,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{ParsedTargets: n.Targets},
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
@ -83,61 +99,49 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
Name: "NodeRefreshableDataResource",
}
return b.Build(ctx.Path())
graph, diags := b.Build(ctx.Path())
return graph, diags.ErrWithWarnings()
}
// NodeRefreshableDataResourceInstance represents a _single_ resource instance
// NodeRefreshableDataResourceInstance represents a single resource instance
// that is refreshable.
type NodeRefreshableDataResourceInstance struct {
*NodeAbstractResource
*NodeAbstractResourceInstance
}
// GraphNodeEvalable
func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
}
// Get the state if we have it, if not we build it
// Get the state if we have it. If not, we'll build it.
rs := n.ResourceState
if rs == nil {
rs = &ResourceState{
Provider: n.ResolvedProvider,
Type: addr.Resource.Resource.Type,
Provider: n.ResolvedProvider.String(),
}
}
// If the config isn't empty we update the state
// If we have a configuration then we'll build a fresh state.
if n.Config != nil {
rs = &ResourceState{
Type: n.Config.Type,
Provider: n.Config.Provider,
Type: addr.Resource.Resource.Type,
Provider: n.ResolvedProvider.String(),
Dependencies: n.StateReferences(),
}
}
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
var config *ResourceConfig
var diff *InstanceDiff
// These variables are the state for the eval sequence below, and are
// updated through pointers.
var provider ResourceProvider
var providerSchema *ProviderSchema
var diff *InstanceDiff
var state *InstanceState
var configVal cty.Value
return &EvalSequence{
Nodes: []EvalNode{
@ -153,10 +157,19 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
State: &state, // state is nil here
},
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &config,
&EvalGetProvider{
Addr: n.ResolvedProvider,
Output: &provider,
},
&EvalReadDataDiff{
Addr: addr.Resource,
Config: n.Config,
Provider: &provider,
ProviderSchema: &providerSchema,
Output: &diff,
OutputValue: &configVal,
OutputState: &state,
},
// The rest of this pass can proceed only if there are no
@ -165,7 +178,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
// apply phases.)
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
if !configVal.IsWhollyKnown() {
return true, EvalEarlyExitError{}
}
@ -178,30 +191,11 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
return true, nil
},
Then: EvalNoop{},
},
// The remainder of this pass is the same as running
// a "plan" pass immediately followed by an "apply" pass,
// populating the state early so it'll be available to
// provider configurations that need this data during
// refresh/plan.
&EvalGetProvider{
Name: n.ResolvedProvider,
Output: &provider,
},
&EvalReadDataDiff{
Info: info,
Config: &config,
Provider: &provider,
Output: &diff,
OutputState: &state,
},
&EvalReadDataApply{
Info: info,
Addr: addr.Resource,
Diff: &diff,
Provider: &provider,
Output: &state,

View File

@ -1,10 +1,9 @@
package terraform
import (
"fmt"
"strings"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/lang"
)
// NodeLocal represents a named local value in a particular module.
@ -12,22 +11,25 @@ import (
// Local value nodes only have one operation, common to all walk types:
// evaluate the result and place it in state.
type NodeLocal struct {
PathValue []string
Config *config.Local
Addr addrs.AbsLocalValue
Config *configs.Local
}
func (n *NodeLocal) Name() string {
result := fmt.Sprintf("local.%s", n.Config.Name)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
}
var (
_ GraphNodeSubPath = (*NodeLocal)(nil)
_ RemovableIfNotTargeted = (*NodeLocal)(nil)
_ GraphNodeReferenceable = (*NodeLocal)(nil)
_ GraphNodeReferencer = (*NodeLocal)(nil)
_ GraphNodeEvalable = (*NodeLocal)(nil)
)
return result
func (n *NodeLocal) Name() string {
return n.Addr.String()
}
// GraphNodeSubPath
func (n *NodeLocal) Path() []string {
return n.PathValue
func (n *NodeLocal) Path() addrs.ModuleInstance {
return n.Addr.Module
}
// RemovableIfNotTargeted
@ -36,31 +38,20 @@ func (n *NodeLocal) RemoveIfNotTargeted() bool {
}
// GraphNodeReferenceable
func (n *NodeLocal) ReferenceableName() []string {
name := fmt.Sprintf("local.%s", n.Config.Name)
return []string{name}
func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable {
return []addrs.Referenceable{n.Addr.LocalValue}
}
// GraphNodeReferencer
func (n *NodeLocal) References() []string {
var result []string
result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
for _, v := range result {
split := strings.Split(v, "/")
for i, s := range split {
split[i] = s + ".destroy"
}
result = append(result, strings.Join(split, "/"))
}
return result
func (n *NodeLocal) References() []*addrs.Reference {
refs, _ := lang.ReferencesInExpr(n.Config.Expr)
return refs
}
// GraphNodeEvalable
func (n *NodeLocal) EvalTree() EvalNode {
return &EvalLocal{
Name: n.Config.Name,
Value: n.Config.RawConfig,
Addr: n.Addr.LocalValue,
Expr: n.Config.Expr,
}
}

View File

@ -3,22 +3,30 @@ package terraform
import (
"fmt"
"log"
"reflect"
"github.com/hashicorp/terraform/addrs"
)
// NodeModuleRemoved represents a module that is no longer in the
// config.
type NodeModuleRemoved struct {
PathValue []string
Addr addrs.ModuleInstance
}
var (
_ GraphNodeSubPath = (*NodeModuleRemoved)(nil)
_ GraphNodeEvalable = (*NodeModuleRemoved)(nil)
_ GraphNodeReferencer = (*NodeModuleRemoved)(nil)
_ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil)
)
func (n *NodeModuleRemoved) Name() string {
return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue))
return fmt.Sprintf("%s (removed)", n.Addr.String())
}
// GraphNodeSubPath
func (n *NodeModuleRemoved) Path() []string {
return n.PathValue
func (n *NodeModuleRemoved) Path() addrs.ModuleInstance {
return n.Addr
}
// GraphNodeEvalable
@ -26,23 +34,40 @@ func (n *NodeModuleRemoved) EvalTree() EvalNode {
return &EvalOpFilter{
Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
Node: &EvalDeleteModule{
PathValue: n.PathValue,
Addr: n.Addr,
},
}
}
func (n *NodeModuleRemoved) ReferenceGlobal() bool {
return true
func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
// Our "References" implementation indicates that this node depends on
// the call to the module it represents, which implicitly depends on
// everything inside the module. That reference must therefore be
// interpreted in terms of our parent module.
return n.Addr, n.Addr.Parent()
}
func (n *NodeModuleRemoved) References() []string {
return []string{modulePrefixStr(n.PathValue)}
func (n *NodeModuleRemoved) References() []*addrs.Reference {
// We depend on the call to the module we represent, because that
// implicitly then depends on everything inside that module.
// Our ReferenceOutside implementation causes this to be interpreted
// within the parent module.
_, call := n.Addr.CallInstance()
return []*addrs.Reference{
{
Subject: call,
// No source range here, because there's nothing reasonable for
// us to return.
},
}
}
// EvalDeleteModule is an EvalNode implementation that removes an empty module
// entry from the state.
type EvalDeleteModule struct {
PathValue []string
Addr addrs.ModuleInstance
}
func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) {
@ -60,17 +85,33 @@ func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) {
state.prune()
// find the module and delete it
Modules:
for i, m := range state.Modules {
if reflect.DeepEqual(m.Path, n.PathValue) {
if !m.Empty() {
// a targeted apply may leave module resources even without a config,
// so just log this and return.
log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue))
break
// Since state is still using our old-style []string path representation,
// comparison is a little awkward. This can be simplified once state
// is updated to use addrs.ModuleInstance too.
if len(m.Path) != len(n.Addr) {
continue Modules
}
for i, step := range n.Addr {
if step.InstanceKey != addrs.NoKey {
// Old-style state path can't have keys anyway, so this can
// never match.
continue Modules
}
state.Modules = append(state.Modules[:i], state.Modules[i+1:]...)
if step.Name != m.Path[i] {
continue Modules
}
}
if !m.Empty() {
// a targeted apply may leave module resources even without a config,
// so just log this and return.
log.Printf("[DEBUG] not removing %s from state: not empty", n.Addr)
break
}
state.Modules = append(state.Modules[:i], state.Modules[i+1:]...)
break
}
return nil, nil

View File

@ -1,40 +1,41 @@
package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/lang"
"github.com/zclconf/go-cty/cty"
)
// NodeApplyableModuleVariable represents a module variable input during
// the apply step.
type NodeApplyableModuleVariable struct {
PathValue []string
Config *config.Variable // Config is the var in the config
Value *config.RawConfig // Value is the value that is set
Module *module.Tree // Antiquated, want to remove
Addr addrs.AbsInputVariableInstance
Config *configs.Variable // Config is the var in the config
Expr hcl.Expression // Expr is the value expression given in the call
}
func (n *NodeApplyableModuleVariable) Name() string {
result := fmt.Sprintf("var.%s", n.Config.Name)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
}
// Ensure that we are implementing all of the interfaces we think we are
// implementing.
var (
_ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil)
_ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil)
_ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil)
_ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil)
_ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil)
_ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil)
)
return result
func (n *NodeApplyableModuleVariable) Name() string {
return n.Addr.String()
}
// GraphNodeSubPath
func (n *NodeApplyableModuleVariable) Path() []string {
// We execute in the parent scope (above our own module) so that
// we can access the proper interpolations.
if len(n.PathValue) > 2 {
return n.PathValue[:len(n.PathValue)-1]
}
return rootModulePath
func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance {
// We execute in the parent scope (above our own module) because
// expressions in our value are resolved in that context.
return n.Addr.Module.Parent()
}
// RemovableIfNotTargeted
@ -44,94 +45,95 @@ func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
return true
}
// GraphNodeReferenceGlobal
func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
// We have to create fully qualified references because we cross
// boundaries here: our ReferenceableName is in one path and our
// References are from another path.
return true
// GraphNodeReferenceOutside implementation
func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
// Module input variables have their value expressions defined in the
// context of their calling (parent) module, and so references from
// a node of this type should be resolved in the parent module instance.
referencePath = n.Addr.Module.Parent()
// Input variables are _referenced_ from their own module, though.
selfPath = n.Addr.Module
return // uses named return values
}
// GraphNodeReferenceable
func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
return []string{n.Name()}
func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable {
return []addrs.Referenceable{n.Addr.Variable}
}
// GraphNodeReferencer
func (n *NodeApplyableModuleVariable) References() []string {
// If we have no value set, we depend on nothing
if n.Value == nil {
func (n *NodeApplyableModuleVariable) References() []*addrs.Reference {
// If we have no value expression, we cannot depend on anything.
if n.Expr == nil {
return nil
}
// Can't depend on anything if we're in the root
if len(n.PathValue) < 2 {
// Variables in the root don't depend on anything, because their values
// are gathered prior to the graph walk and recorded in the context.
if len(n.Addr.Module) == 0 {
return nil
}
// Otherwise, we depend on anything that is in our value, but
// specifically in the namespace of the parent path.
// Create the prefix based on the path
var prefix string
if p := n.Path(); len(p) > 0 {
prefix = modulePrefixStr(p)
}
result := ReferencesFromConfig(n.Value)
return modulePrefixList(result, prefix)
// Otherwise, we depend on anything referenced by our value expression.
// We ignore diagnostics here under the assumption that we'll re-eval
// all these things later and catch them then; for our purposes here,
// we only care about valid references.
//
// Due to our GraphNodeReferenceOutside implementation, the addresses
// returned by this function are interpreted in the _parent_ module from
// where our associated variable was declared, which is correct because
// our value expression is assigned within a "module" block in the parent
// module.
refs, _ := lang.ReferencesInExpr(n.Expr)
return refs
}
// GraphNodeEvalable
func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
// If we have no value, do nothing
if n.Value == nil {
if n.Expr == nil {
return &EvalNoop{}
}
// Otherwise, interpolate the value of this variable and set it
// within the variables mapping.
var config *ResourceConfig
variables := make(map[string]interface{})
vals := make(map[string]cty.Value)
_, call := n.Addr.Module.CallInstance()
return &EvalSequence{
Nodes: []EvalNode{
&EvalOpFilter{
Ops: []walkOperation{walkInput},
Node: &EvalInterpolate{
Config: n.Value,
Output: &config,
ContinueOnErr: true,
Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
walkDestroy, walkValidate},
Node: &EvalModuleCallArgument{
Addr: n.Addr.Variable,
Config: n.Config,
Expr: n.Expr,
Values: vals,
IgnoreDiagnostics: false,
},
},
&EvalOpFilter{
Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
walkDestroy, walkValidate},
Node: &EvalInterpolate{
Config: n.Value,
Output: &config,
Ops: []walkOperation{walkInput},
Node: &EvalModuleCallArgument{
Addr: n.Addr.Variable,
Config: n.Config,
Expr: n.Expr,
Values: vals,
IgnoreDiagnostics: true,
},
},
&EvalVariableBlock{
Config: &config,
VariableValues: variables,
},
&EvalCoerceMapVariable{
Variables: variables,
ModulePath: n.PathValue,
ModuleTree: n.Module,
},
&EvalTypeCheckVariable{
Variables: variables,
ModulePath: n.PathValue,
ModuleTree: n.Module,
},
&EvalSetVariables{
Module: &n.PathValue[len(n.PathValue)-1],
Variables: variables,
&EvalSetModuleCallArguments{
Module: call,
Values: vals,
},
},
}

View File

@ -2,31 +2,37 @@ package terraform
import (
"fmt"
"strings"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
)
// NodeApplyableOutput represents an output that is "applyable":
// it is ready to be applied.
type NodeApplyableOutput struct {
PathValue []string
Config *config.Output // Config is the output in the config
Addr addrs.AbsOutputValue
Config *configs.Output // Config is the output in the config
}
func (n *NodeApplyableOutput) Name() string {
result := fmt.Sprintf("output.%s", n.Config.Name)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
}
var (
_ GraphNodeSubPath = (*NodeApplyableOutput)(nil)
_ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil)
_ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil)
_ GraphNodeReferenceable = (*NodeApplyableOutput)(nil)
_ GraphNodeReferencer = (*NodeApplyableOutput)(nil)
_ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil)
_ GraphNodeEvalable = (*NodeApplyableOutput)(nil)
)
return result
func (n *NodeApplyableOutput) Name() string {
return n.Addr.String()
}
// GraphNodeSubPath
func (n *NodeApplyableOutput) Path() []string {
return n.PathValue
func (n *NodeApplyableOutput) Path() addrs.ModuleInstance {
return n.Addr.Module
}
// RemovableIfNotTargeted
@ -44,27 +50,64 @@ func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag
return true
}
func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) {
// Output values have their expressions resolved in the context of the
// module where they are defined.
referencePath = addr.Module
// ...but they are referenced in the context of their calling module.
selfPath = addr.Module.Parent()
return // uses named return values
}
// GraphNodeReferenceOutside implementation
func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
return referenceOutsideForOutput(n.Addr)
}
func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable {
// An output in the root module can't be referenced at all.
if addr.Module.IsRoot() {
return nil
}
// Otherwise, we can be referenced via a reference to our output name
// on the parent module's call, or via a reference to the entire call.
// e.g. module.foo.bar or just module.foo .
// Note that our ReferenceOutside method causes these addresses to be
// relative to the calling module, not the module where the output
// was declared.
_, outp := addr.ModuleCallOutput()
_, call := addr.Module.CallInstance()
return []addrs.Referenceable{outp, call}
}
// GraphNodeReferenceable
func (n *NodeApplyableOutput) ReferenceableName() []string {
name := fmt.Sprintf("output.%s", n.Config.Name)
return []string{name}
func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable {
return referenceableAddrsForOutput(n.Addr)
}
func referencesForOutput(c *configs.Output) []*addrs.Reference {
impRefs, _ := lang.ReferencesInExpr(c.Expr)
expRefs, _ := lang.References(c.DependsOn)
l := len(impRefs) + len(expRefs)
if l == 0 {
return nil
}
refs := make([]*addrs.Reference, 0, l)
refs = append(refs, impRefs...)
refs = append(refs, expRefs...)
return refs
}
// GraphNodeReferencer
func (n *NodeApplyableOutput) References() []string {
var result []string
result = append(result, n.Config.DependsOn...)
result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
for _, v := range result {
split := strings.Split(v, "/")
for i, s := range split {
split[i] = s + ".destroy"
}
result = append(result, strings.Join(split, "/"))
}
return result
func (n *NodeApplyableOutput) References() []*addrs.Reference {
return referencesForOutput(n.Config)
}
// GraphNodeEvalable
@ -76,18 +119,18 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode {
// before Refresh.
Ops: []walkOperation{walkInput},
Node: &EvalWriteOutput{
Name: n.Config.Name,
Addr: n.Addr.OutputValue,
Sensitive: n.Config.Sensitive,
Value: n.Config.RawConfig,
Expr: n.Config.Expr,
ContinueOnErr: true,
},
},
&EvalOpFilter{
Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy},
Node: &EvalWriteOutput{
Name: n.Config.Name,
Addr: n.Addr.OutputValue,
Sensitive: n.Config.Sensitive,
Value: n.Config.RawConfig,
Expr: n.Config.Expr,
},
},
},
@ -97,22 +140,25 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode {
// NodeDestroyableOutput represents an output that is "destroybale":
// its application will remove the output from the state.
type NodeDestroyableOutput struct {
PathValue []string
Config *config.Output // Config is the output in the config
Addr addrs.AbsOutputValue
Config *configs.Output // Config is the output in the config
}
func (n *NodeDestroyableOutput) Name() string {
result := fmt.Sprintf("output.%s (destroy)", n.Config.Name)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
}
var (
_ GraphNodeSubPath = (*NodeDestroyableOutput)(nil)
_ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil)
_ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil)
_ GraphNodeReferencer = (*NodeDestroyableOutput)(nil)
_ GraphNodeEvalable = (*NodeDestroyableOutput)(nil)
)
return result
func (n *NodeDestroyableOutput) Name() string {
return fmt.Sprintf("%s (destroy)", n.Addr.String())
}
// GraphNodeSubPath
func (n *NodeDestroyableOutput) Path() []string {
return n.PathValue
func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance {
return n.Addr.Module
}
// RemovableIfNotTargeted
@ -129,25 +175,13 @@ func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *d
}
// GraphNodeReferencer
func (n *NodeDestroyableOutput) References() []string {
var result []string
result = append(result, n.Config.DependsOn...)
result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
for _, v := range result {
split := strings.Split(v, "/")
for i, s := range split {
split[i] = s + ".destroy"
}
result = append(result, strings.Join(split, "/"))
}
return result
func (n *NodeDestroyableOutput) References() []*addrs.Reference {
return referencesForOutput(n.Config)
}
// GraphNodeEvalable
func (n *NodeDestroyableOutput) EvalTree() EvalNode {
return &EvalDeleteOutput{
Name: n.Config.Name,
Addr: n.Addr.OutputValue,
}
}

View File

@ -2,31 +2,39 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/addrs"
)
// NodeOutputOrphan represents an output that is an orphan.
type NodeOutputOrphan struct {
OutputName string
PathValue []string
Addr addrs.AbsOutputValue
}
func (n *NodeOutputOrphan) Name() string {
result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
}
var (
_ GraphNodeSubPath = (*NodeOutputOrphan)(nil)
_ GraphNodeReferenceable = (*NodeOutputOrphan)(nil)
_ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil)
_ GraphNodeEvalable = (*NodeOutputOrphan)(nil)
)
return result
func (n *NodeOutputOrphan) Name() string {
return fmt.Sprintf("%s (orphan)", n.Addr.String())
}
// GraphNodeReferenceOutside implementation
func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
return referenceOutsideForOutput(n.Addr)
}
// GraphNodeReferenceable
func (n *NodeOutputOrphan) ReferenceableName() []string {
return []string{"output." + n.OutputName}
func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable {
return referenceableAddrsForOutput(n.Addr)
}
// GraphNodeSubPath
func (n *NodeOutputOrphan) Path() []string {
return n.PathValue
func (n *NodeOutputOrphan) Path() addrs.ModuleInstance {
return n.Addr.Module
}
// GraphNodeEvalable
@ -34,7 +42,7 @@ func (n *NodeOutputOrphan) EvalTree() EvalNode {
return &EvalOpFilter{
Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
Node: &EvalDeleteOutput{
Name: n.OutputName,
Addr: n.Addr.OutputValue,
},
}
}

View File

@ -1,10 +1,9 @@
package terraform
import (
"fmt"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/dag"
)
@ -15,37 +14,32 @@ type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
// NodeAbstractProvider represents a provider that has no associated operations.
// It registers all the common interfaces across operations for providers.
type NodeAbstractProvider struct {
NameValue string
PathValue []string
Addr addrs.AbsProviderConfig
// The fields below will be automatically set using the Attach
// interfaces if you're running those transforms, but also be explicitly
// set if you already have that information.
Config *config.ProviderConfig
Config *configs.Provider
Schema *ProviderSchema
}
func ResolveProviderName(name string, path []string) string {
if strings.Contains(name, "provider.") {
// already resolved
return name
}
name = fmt.Sprintf("provider.%s", name)
if len(path) >= 1 {
name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name)
}
return name
}
var (
_ GraphNodeSubPath = (*NodeAbstractProvider)(nil)
_ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil)
_ GraphNodeReferencer = (*NodeAbstractProvider)(nil)
_ GraphNodeProvider = (*NodeAbstractProvider)(nil)
_ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil)
_ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil)
)
func (n *NodeAbstractProvider) Name() string {
return ResolveProviderName(n.NameValue, n.PathValue)
return n.Addr.String()
}
// GraphNodeSubPath
func (n *NodeAbstractProvider) Path() []string {
return n.PathValue
func (n *NodeAbstractProvider) Path() addrs.ModuleInstance {
return n.Addr.Module
}
// RemovableIfNotTargeted
@ -56,21 +50,21 @@ func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
}
// GraphNodeReferencer
func (n *NodeAbstractProvider) References() []string {
if n.Config == nil {
func (n *NodeAbstractProvider) References() []*addrs.Reference {
if n.Config == nil || n.Schema == nil {
return nil
}
return ReferencesFromConfig(n.Config.RawConfig)
return ReferencesFromConfig(n.Config.Config, n.Schema.Provider)
}
// GraphNodeProvider
func (n *NodeAbstractProvider) ProviderName() string {
return n.NameValue
func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig {
return n.Addr
}
// GraphNodeProvider
func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig {
func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider {
if n.Config == nil {
return nil
}
@ -79,10 +73,15 @@ func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig {
}
// GraphNodeAttachProvider
func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) {
n.Config = c
}
// GraphNodeAttachProvider
func (n *NodeAbstractProvider) AttachProviderSchema(s *ProviderSchema) {
n.Schema = s
}
// GraphNodeDotter impl.
func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
return &dag.DotNode{

View File

@ -2,6 +2,8 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/dag"
)
// NodeDisabledProvider represents a provider that is disabled. A disabled
@ -11,24 +13,15 @@ type NodeDisabledProvider struct {
*NodeAbstractProvider
}
var (
_ GraphNodeSubPath = (*NodeDisabledProvider)(nil)
_ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil)
_ GraphNodeReferencer = (*NodeDisabledProvider)(nil)
_ GraphNodeProvider = (*NodeDisabledProvider)(nil)
_ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil)
_ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil)
)
func (n *NodeDisabledProvider) Name() string {
return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
}
// GraphNodeEvalable
func (n *NodeDisabledProvider) EvalTree() EvalNode {
var resourceConfig *ResourceConfig
return &EvalSequence{
Nodes: []EvalNode{
&EvalInterpolateProvider{
Config: n.ProviderConfig(),
Output: &resourceConfig,
},
&EvalBuildProviderConfig{
Provider: n.ProviderName(),
Config: &resourceConfig,
Output: &resourceConfig,
},
},
}
}

View File

@ -3,6 +3,7 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
)
@ -10,7 +11,7 @@ import (
// It registers all the common interfaces across operations for providers.
type NodeProvisioner struct {
NameValue string
PathValue []string
PathValue addrs.ModuleInstance
// The fields below will be automatically set using the Attach
// interfaces if you're running those transforms, but also be explicitly
@ -19,17 +20,23 @@ type NodeProvisioner struct {
Config *config.ProviderConfig
}
var (
_ GraphNodeSubPath = (*NodeProvisioner)(nil)
_ GraphNodeProvisioner = (*NodeProvisioner)(nil)
_ GraphNodeEvalable = (*NodeProvisioner)(nil)
)
func (n *NodeProvisioner) Name() string {
result := fmt.Sprintf("provisioner.%s", n.NameValue)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
result = fmt.Sprintf("%s.%s", n.PathValue.String(), result)
}
return result
}
// GraphNodeSubPath
func (n *NodeProvisioner) Path() []string {
func (n *NodeProvisioner) Path() addrs.ModuleInstance {
return n.PathValue
}

View File

@ -2,9 +2,17 @@ package terraform
import (
"fmt"
"strings"
"log"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
)
@ -16,222 +24,366 @@ type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
// The type of operation cannot be assumed, only that this node represents
// the given resource.
type GraphNodeResource interface {
ResourceAddr() *ResourceAddress
ResourceAddr() addrs.AbsResource
}
// ConcreteResourceInstanceNodeFunc is a callback type used to convert an
// abstract resource instance to a concrete one of some type.
type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex
// GraphNodeResourceInstance is implemented by any nodes that represent
// a resource instance. A single resource may have multiple instances if,
// for example, the "count" or "for_each" argument is used for it in
// configuration.
type GraphNodeResourceInstance interface {
ResourceInstanceAddr() addrs.AbsResourceInstance
}
// NodeAbstractResource represents a resource that has no associated
// operations. It registers all the interfaces for a resource that common
// across multiple operation types.
type NodeAbstractResource struct {
Addr *ResourceAddress // Addr is the address for this resource
Addr addrs.AbsResource // Addr is the address for this resource
// The fields below will be automatically set using the Attach
// interfaces if you're running those transforms, but also be explicitly
// set if you already have that information.
Config *config.Resource // Config is the resource in the config
ResourceState *ResourceState // ResourceState is the ResourceState for this
Schema *configschema.Block // Schema for processing the configuration body
Config *configs.Resource // Config is the resource in the config
Targets []ResourceAddress // Set from GraphNodeTargetable
ProvisionerSchemas map[string]*configschema.Block
Targets []addrs.Targetable // Set from GraphNodeTargetable
// The address of the provider this resource will use
ResolvedProvider string
ResolvedProvider addrs.AbsProviderConfig
}
var (
_ GraphNodeSubPath = (*NodeAbstractResource)(nil)
_ GraphNodeReferenceable = (*NodeAbstractResource)(nil)
_ GraphNodeReferencer = (*NodeAbstractResource)(nil)
_ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil)
_ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil)
_ GraphNodeResource = (*NodeAbstractResource)(nil)
_ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil)
_ GraphNodeTargetable = (*NodeAbstractResource)(nil)
_ dag.GraphNodeDotter = (*NodeAbstractResource)(nil)
)
// NewNodeAbstractResource creates an abstract resource graph node for
// the given absolute resource address.
func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource {
return &NodeAbstractResource{
Addr: addr,
}
}
// NodeAbstractResourceInstance represents a resource instance with no
// associated operations. It embeds NodeAbstractResource but additionally
// contains an instance key, used to identify one of potentially many
// instances that were created from a resource in configuration, e.g. using
// the "count" or "for_each" arguments.
type NodeAbstractResourceInstance struct {
NodeAbstractResource
InstanceKey addrs.InstanceKey
// The fields below will be automatically set using the Attach
// interfaces if you're running those transforms, but also be explicitly
// set if you already have that information.
ResourceState *ResourceState // the ResourceState for this instance
}
var (
_ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeResource = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil)
_ GraphNodeTargetable = (*NodeAbstractResource)(nil)
_ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil)
)
// NewNodeAbstractResourceInstance creates an abstract resource instance graph
// node for the given absolute resource instance address.
func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance {
// Due to the fact that we embed NodeAbstractResource, the given address
// actually ends up split between the resource address in the embedded
// object and the InstanceKey field in our own struct. The
// ResourceInstanceAddr method will stick these back together again on
// request.
return &NodeAbstractResourceInstance{
NodeAbstractResource: NodeAbstractResource{
Addr: addr.ContainingResource(),
},
InstanceKey: addr.Resource.Key,
}
}
func (n *NodeAbstractResource) Name() string {
return n.Addr.String()
return n.ResourceAddr().String()
}
func (n *NodeAbstractResourceInstance) Name() string {
return n.ResourceInstanceAddr().String()
}
// GraphNodeSubPath
func (n *NodeAbstractResource) Path() []string {
return n.Addr.Path
func (n *NodeAbstractResource) Path() addrs.ModuleInstance {
return n.Addr.Module
}
// GraphNodeReferenceable
func (n *NodeAbstractResource) ReferenceableName() []string {
// We always are referenceable as "type.name" as long as
// we have a config or address. Determine what that value is.
var id string
if n.Config != nil {
id = n.Config.Id()
} else if n.Addr != nil {
addrCopy := n.Addr.Copy()
addrCopy.Path = nil // ReferenceTransformer handles paths
addrCopy.Index = -1 // We handle indexes below
id = addrCopy.String()
} else {
// No way to determine our type.name, just return
return nil
func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable {
return []addrs.Referenceable{n.Addr.Resource}
}
// GraphNodeReferenceable
func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable {
addr := n.ResourceInstanceAddr()
return []addrs.Referenceable{
addr.Resource,
// A resource instance can also be referenced by the address of its
// containing resource, so that e.g. a reference to aws_instance.foo
// would match both aws_instance.foo[0] and aws_instance.foo[1].
addr.ContainingResource().Resource,
}
var result []string
// Always include our own ID. This is primarily for backwards
// compatibility with states that didn't yet support the more
// specific dep string.
result = append(result, id)
// We represent all multi-access
result = append(result, fmt.Sprintf("%s.*", id))
// We represent either a specific number, or all numbers
suffix := "N"
if n.Addr != nil {
idx := n.Addr.Index
if idx == -1 {
idx = 0
}
suffix = fmt.Sprintf("%d", idx)
}
result = append(result, fmt.Sprintf("%s.%s", id, suffix))
return result
}
// GraphNodeReferencer
func (n *NodeAbstractResource) References() []string {
// If we have a config, that is our source of truth
func (n *NodeAbstractResource) References() []*addrs.Reference {
// If we have a config then we prefer to use that.
if c := n.Config; c != nil {
// Grab all the references
var result []string
result = append(result, c.DependsOn...)
result = append(result, ReferencesFromConfig(c.RawCount)...)
result = append(result, ReferencesFromConfig(c.RawConfig)...)
for _, p := range c.Provisioners {
if p.When == config.ProvisionerWhenCreate {
result = append(result, ReferencesFromConfig(p.ConnInfo)...)
result = append(result, ReferencesFromConfig(p.RawConfig)...)
var result []*addrs.Reference
for _, traversal := range c.DependsOn {
ref, err := addrs.ParseRef(traversal)
if err != nil {
// We ignore this here, because this isn't a suitable place to return
// errors. This situation should be caught and rejected during
// validation.
log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err)
continue
}
result = append(result, ref)
}
return uniqueStrings(result)
refs, _ := lang.ReferencesInExpr(c.Count)
result = append(result, refs...)
refs, _ = lang.ReferencesInBlock(c.Config, n.Schema)
result = append(result, refs...)
if c.Managed != nil {
for _, p := range c.Managed.Provisioners {
if p.When != configs.ProvisionerWhenCreate {
continue
}
refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema)
result = append(result, refs...)
schema := n.ProvisionerSchemas[p.Type]
refs, _ = lang.ReferencesInBlock(p.Config, schema)
result = append(result, refs...)
}
}
return result
}
// If we have state, that is our next source
// Otherwise, we have no references.
return nil
}
// GraphNodeReferencer
func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
// If we have a configuration attached then we'll delegate to our
// embedded abstract resource, which knows how to extract dependencies
// from configuration.
if n.Config != nil {
return n.NodeAbstractResource.References()
}
// Otherwise, if we have state then we'll use the values stored in state
// as a fallback.
if s := n.ResourceState; s != nil {
return s.Dependencies
// State is still storing dependencies as old-style strings, so we'll
// need to do a little work here to massage this to the form we now
// want.
var result []*addrs.Reference
for _, legacyDep := range s.Dependencies {
traversal, diags := hclsyntax.ParseTraversalAbs([]byte(legacyDep), "", hcl.Pos{})
if diags.HasErrors() {
log.Printf("[ERROR] Can't parse %q from dependencies in state as a reference: invalid syntax", legacyDep)
continue
}
ref, err := addrs.ParseRef(traversal)
if err != nil {
log.Printf("[ERROR] Can't parse %q from dependencies in state as a reference: invalid syntax", legacyDep)
continue
}
result = append(result, ref)
}
return result
}
// If we have neither config nor state then we have no references.
return nil
}
// StateReferences returns the dependencies to put into the state for
// this resource.
func (n *NodeAbstractResource) StateReferences() []string {
self := n.ReferenceableName()
// Determine what our "prefix" is for checking for references to
// ourself.
addrCopy := n.Addr.Copy()
addrCopy.Index = -1
selfPrefix := addrCopy.String() + "."
selfAddrs := n.ReferenceableAddrs()
depsRaw := n.References()
deps := make([]string, 0, len(depsRaw))
for _, d := range depsRaw {
// Ignore any variable dependencies
if strings.HasPrefix(d, "var.") {
continue
}
// If this has a backup ref, ignore those for now. The old state
// file never contained those and I'd rather store the rich types we
// add in the future.
if idx := strings.IndexRune(d, '/'); idx != -1 {
d = d[:idx]
}
// If we're referencing ourself, then ignore it
found := false
for _, s := range self {
if d == s {
found = true
switch tr := d.Subject.(type) {
case addrs.ResourceInstance:
// For historical reasons, state uses dot-separated instance keys,
// rather than bracketed as in our modern syntax.
var suffix string
switch tk := tr.Key.(type) {
case addrs.IntKey:
suffix = fmt.Sprintf(".%d", int(tk))
case addrs.StringKey:
suffix = fmt.Sprintf(".%s", string(tk))
}
key := tr.Resource.String() + suffix
deps = append(deps, key)
case addrs.Resource:
depStr := tr.String()
selfRef := false
for _, selfAddr := range selfAddrs {
if selfAddr.String() == depStr {
selfRef = true
break
}
}
if !selfRef { // Don't create self-references
deps = append(deps, tr.String())
}
case addrs.ModuleCallInstance:
deps = append(deps, tr.String())
case addrs.ModuleCallOutput:
// For state dependencies, we simplify outputs to just refer
// to the module as a whole. It's not really clear why we do this,
// but this logic is preserved from before the 0.12 rewrite of
// this function.
deps = append(deps, tr.Call.String())
default:
// No other reference types are recorded in the state.
}
if found {
continue
}
// If this is a reference to ourself and a specific index, we keep
// it. For example, if this resource is "foo.bar" and the reference
// is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
d = d[:len(d)-2]
}
// This is sad. The dependencies are currently in the format of
// "module.foo.bar" (the full field). This strips the field off.
if strings.HasPrefix(d, "module.") {
parts := strings.SplitN(d, ".", 3)
d = strings.Join(parts[0:2], ".")
}
deps = append(deps, d)
}
return deps
}
func (n *NodeAbstractResource) SetProvider(p string) {
func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) {
n.ResolvedProvider = p
}
// GraphNodeProviderConsumer
func (n *NodeAbstractResource) ProvidedBy() string {
func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) {
// If we have a config we prefer that above all else
if n.Config != nil {
return resourceProvider(n.Config.Type, n.Config.Provider)
relAddr := n.Config.ProviderConfigAddr()
return relAddr.Absolute(n.Path()), false
}
// Use our type and containing module path to guess a provider configuration address
return addrs.NewDefaultProviderConfig(n.Addr.Resource.Type).Absolute(n.Addr.Module), false
}
// GraphNodeProviderConsumer
func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) {
// If we have a config we prefer that above all else
if n.Config != nil {
relAddr := n.Config.ProviderConfigAddr()
return relAddr.Absolute(n.Path()), false
}
// If we have state, then we will use the provider from there
if n.ResourceState != nil && n.ResourceState.Provider != "" {
return n.ResourceState.Provider
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(n.ResourceState.Provider), "", hcl.Pos{})
if parseDiags.HasErrors() {
log.Printf("[ERROR] %s has syntax-invalid provider address %q", n.Addr, n.ResourceState.Provider)
goto Guess
}
addr, diags := addrs.ParseAbsProviderConfig(traversal)
if diags.HasErrors() {
log.Printf("[ERROR] %s has content-invalid provider address %q", n.Addr, n.ResourceState.Provider)
goto Guess
}
// An address from the state must match exactly, since we must ensure
// we refresh/destroy a resource with the same provider configuration
// that created it.
return addr, true
}
// Use our type
return resourceProvider(n.Addr.Type, "")
Guess:
// Use our type and containing module path to guess a provider configuration address
return addrs.NewDefaultProviderConfig(n.Addr.Resource.Type).Absolute(n.Addr.Module), false
}
// GraphNodeProvisionerConsumer
func (n *NodeAbstractResource) ProvisionedBy() []string {
// If we have no configuration, then we have no provisioners
if n.Config == nil {
if n.Config == nil || n.Config.Managed == nil {
return nil
}
// Build the list of provisioners we need based on the configuration.
// It is okay to have duplicates here.
result := make([]string, len(n.Config.Provisioners))
for i, p := range n.Config.Provisioners {
result := make([]string, len(n.Config.Managed.Provisioners))
for i, p := range n.Config.Managed.Provisioners {
result[i] = p.Type
}
return result
}
// GraphNodeResource, GraphNodeAttachResourceState
func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
// GraphNodeProvisionerConsumer
func (n *NodeAbstractResource) SetProvisionerSchema(name string, schema *configschema.Block) {
n.ProvisionerSchemas[name] = schema
}
// GraphNodeResource
func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource {
return n.Addr
}
// GraphNodeResourceInstance
func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance {
return n.NodeAbstractResource.Addr.Instance(n.InstanceKey)
}
// GraphNodeAddressable, TODO: remove, used by target, should unify
func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
return n.ResourceAddr()
return NewLegacyResourceAddress(n.Addr)
}
// GraphNodeTargetable
func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) {
n.Targets = targets
}
// GraphNodeAttachResourceState
func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
func (n *NodeAbstractResourceInstance) AttachResourceState(s *ResourceState) {
n.ResourceState = s
}
// GraphNodeAttachResourceConfig
func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) {
n.Config = c
}

View File

@ -1,50 +0,0 @@
package terraform
// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
// if the resource has a `count` value that needs to be expanded.
//
// The embedder should implement `DynamicExpand` to process the count.
type NodeAbstractCountResource struct {
*NodeAbstractResource
// Validate, if true, will perform the validation for the count.
// This should only be turned on for the "validate" operation.
Validate bool
}
// GraphNodeEvalable
func (n *NodeAbstractCountResource) EvalTree() EvalNode {
// We only check if the count is computed if we're not validating.
// If we're validating we allow computed counts since they just turn
// into more computed values.
var evalCountCheckComputed EvalNode
if !n.Validate {
evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
}
return &EvalSequence{
Nodes: []EvalNode{
// The EvalTree for a plannable resource primarily involves
// interpolating the count since it can contain variables
// we only just received access to.
//
// With the interpolated count, we can then DynamicExpand
// into the proper number of instances.
&EvalInterpolate{Config: n.Config.RawCount},
// Check if the count is computed
evalCountCheckComputed,
// If validation is enabled, perform the validation
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
return n.Validate, nil
},
Then: &EvalValidateCount{Resource: n.Config},
},
&EvalCountFixZeroOneBoundary{Resource: n.Config},
},
}
}

View File

@ -3,112 +3,108 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/zclconf/go-cty/cty"
)
// NodeApplyableResource represents a resource that is "applyable":
// NodeApplyableResourceInstance represents a resource that is "applyable":
// it is ready to be applied and is represented by a diff.
type NodeApplyableResource struct {
*NodeAbstractResource
type NodeApplyableResourceInstance struct {
*NodeAbstractResourceInstance
}
var (
_ GraphNodeResource = (*NodeApplyableResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil)
_ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil)
_ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil)
)
// GraphNodeCreator
func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
return n.NodeAbstractResource.Addr
func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance {
addr := n.ResourceInstanceAddr()
return &addr
}
// GraphNodeReferencer, overriding NodeAbstractResource
func (n *NodeApplyableResource) References() []string {
result := n.NodeAbstractResource.References()
// GraphNodeReferencer, overriding NodeAbstractResourceInstance
func (n *NodeApplyableResourceInstance) References() []*addrs.Reference {
// Start with the usual resource instance implementation
ret := n.NodeAbstractResourceInstance.References()
// The "apply" side of a resource generally also depends on the
// destruction of its dependencies as well. For example, if a LB
// references a set of VMs with ${vm.foo.*.id}, then we must wait for
// the destruction so we get the newly updated list of VMs.
// Applying a resource must also depend on the destruction of any of its
// dependencies, since this may for example affect the outcome of
// evaluating an entire list of resources with "count" set (by reducing
// the count).
//
// The exception here is CBD. When CBD is set, we don't do this since
// it would create a cycle. By not creating a cycle, we require two
// applies since the first apply the creation step will use the OLD
// values (pre-destroy) and the second step will update.
//
// This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
// We mimic that behavior here now and can improve upon it in the future.
//
// This behavior is tested in graph_build_apply_test.go to test ordering.
cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
// However, we can't do this in create_before_destroy mode because that
// would create a dependency cycle. We make a compromise here of requiring
// changes to be updated across two applies in this case, since the first
// plan will use the old values.
cbd := n.Config != nil && n.Config.Managed != nil && n.Config.Managed.CreateBeforeDestroy
if !cbd {
// The "apply" side of a resource always depends on the destruction
// of all its dependencies in addition to the creation.
for _, v := range result {
result = append(result, v+".destroy")
for _, ref := range ret {
switch tr := ref.Subject.(type) {
case addrs.ResourceInstance:
newRef := *ref // shallow copy so we can mutate
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
newRef.Remaining = nil // can't access attributes of something being destroyed
ret = append(ret, &newRef)
case addrs.Resource:
// We'll guess that this is actually a reference to a no-key
// instance here, and generate a reference under that assumption.
// If that's not true then this won't do any harm, since there
// won't actually be a node with this address.
newRef := *ref // shallow copy so we can mutate
newRef.Subject = tr.Instance(addrs.NoKey).Phase(addrs.ResourceInstancePhaseDestroy)
newRef.Remaining = nil // can't access attributes of something being destroyed
ret = append(ret, &newRef)
}
}
}
return result
return nil
}
// GraphNodeEvalable
func (n *NodeApplyableResource) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
}
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Determine the dependencies for the state.
stateDeps := n.StateReferences()
// Eval info is different depending on what kind of resource this is
switch n.Config.Mode {
case config.ManagedResourceMode:
return n.evalTreeManagedResource(
stateId, info, resource, stateDeps,
)
case config.DataResourceMode:
return n.evalTreeDataResource(
stateId, info, resource, stateDeps)
case addrs.ManagedResourceMode:
return n.evalTreeManagedResource(addr, stateId, stateDeps)
case addrs.DataResourceMode:
return n.evalTreeDataResource(addr, stateId, stateDeps)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodeApplyableResource) evalTreeDataResource(
stateId string, info *InstanceInfo,
resource *Resource, stateDeps []string) EvalNode {
func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []string) EvalNode {
var provider ResourceProvider
var config *ResourceConfig
var providerSchema *ProviderSchema
var diff *InstanceDiff
var state *InstanceState
var configVal cty.Value
return &EvalSequence{
Nodes: []EvalNode{
// Build the instance info
&EvalInstanceInfo{
Info: info,
},
// Get the saved diff for apply
&EvalReadDiff{
Name: stateId,
Diff: &diff,
},
// Stop here if we don't actually have a diff
// Stop early if we don't actually have a diff
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
if diff == nil {
@ -124,53 +120,26 @@ func (n *NodeApplyableResource) evalTreeDataResource(
Then: EvalNoop{},
},
// Normally we interpolate count as a preparation step before
// a DynamicExpand, but an apply graph has pre-expanded nodes
// and so the count would otherwise never be interpolated.
//
// This is redundant when there are multiple instances created
// from the same config (count > 1) but harmless since the
// underlying structures have mutexes to make this concurrency-safe.
//
// In most cases this isn't actually needed because we dealt with
// all of the counts during the plan walk, but we do it here
// for completeness because other code assumes that the
// final count is always available during interpolation.
//
// Here we are just populating the interpolated value in-place
// inside this RawConfig object, like we would in
// NodeAbstractCountResource.
&EvalInterpolate{
Config: n.Config.RawCount,
ContinueOnErr: true,
},
// We need to re-interpolate the config here, rather than
// just using the diff's values directly, because we've
// potentially learned more variable values during the
// apply pass that weren't known when the diff was produced.
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &config,
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Addr: n.ResolvedProvider,
Output: &provider,
Schema: &providerSchema,
},
// Make a new diff with our newly-interpolated config.
// Make a new diff, in case we've learned new values in the state
// during apply which we can now incorporate.
&EvalReadDataDiff{
Info: info,
Config: &config,
Previous: &diff,
Provider: &provider,
Output: &diff,
Addr: addr.Resource,
Config: n.Config,
Provider: &provider,
ProviderSchema: &providerSchema,
Output: &diff,
OutputValue: &configVal,
OutputState: &state,
},
&EvalReadDataApply{
Info: info,
Addr: addr.Resource,
Diff: &diff,
Provider: &provider,
Output: &state,
@ -196,26 +165,20 @@ func (n *NodeApplyableResource) evalTreeDataResource(
}
}
func (n *NodeApplyableResource) evalTreeManagedResource(
stateId string, info *InstanceInfo,
resource *Resource, stateDeps []string) EvalNode {
func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []string) EvalNode {
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
var provider ResourceProvider
var providerSchema *ProviderSchema
var diff, diffApply *InstanceDiff
var state *InstanceState
var resourceConfig *ResourceConfig
var err error
var createNew bool
var createBeforeDestroyEnabled bool
var configVal cty.Value
return &EvalSequence{
Nodes: []EvalNode{
// Build the instance info
&EvalInstanceInfo{
Info: info,
},
// Get the saved diff for apply
&EvalReadDiff{
Name: stateId,
@ -223,6 +186,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
},
// We don't want to do any destroys
// (these are handled by NodeDestroyResourceInstance instead)
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
if diffApply == nil {
@ -246,9 +210,9 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
}
createBeforeDestroyEnabled =
n.Config.Lifecycle.CreateBeforeDestroy &&
destroy
if destroy && n.Config.Managed != nil && n.Config.Managed.CreateBeforeDestroy {
createBeforeDestroyEnabled = true
}
return createBeforeDestroyEnabled, nil
},
@ -257,59 +221,27 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
},
},
// Normally we interpolate count as a preparation step before
// a DynamicExpand, but an apply graph has pre-expanded nodes
// and so the count would otherwise never be interpolated.
//
// This is redundant when there are multiple instances created
// from the same config (count > 1) but harmless since the
// underlying structures have mutexes to make this concurrency-safe.
//
// In most cases this isn't actually needed because we dealt with
// all of the counts during the plan walk, but we need to do this
// in order to support interpolation of resource counts from
// apply-time-interpolated expressions, such as those in
// "provisioner" blocks.
//
// Here we are just populating the interpolated value in-place
// inside this RawConfig object, like we would in
// NodeAbstractCountResource.
&EvalInterpolate{
Config: n.Config.RawCount,
ContinueOnErr: true,
},
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &resourceConfig,
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Addr: n.ResolvedProvider,
Output: &provider,
Schema: &providerSchema,
},
&EvalReadState{
Name: stateId,
Output: &state,
},
// Re-run validation to catch any errors we missed, e.g. type
// mismatches on computed values.
&EvalValidateResource{
Provider: &provider,
Config: &resourceConfig,
ResourceName: n.Config.Name,
ResourceType: n.Config.Type,
ResourceMode: n.Config.Mode,
IgnoreWarnings: true,
},
// Make a new diff, in case we've learned new values in the state
// during apply which we can now incorporate.
&EvalDiff{
Info: info,
Config: &resourceConfig,
Resource: n.Config,
Provider: &provider,
Diff: &diffApply,
State: &state,
OutputDiff: &diffApply,
Addr: addr.Resource,
Config: n.Config,
Provider: &provider,
ProviderSchema: &providerSchema,
State: &state,
OutputDiff: &diffApply,
OutputValue: &configVal,
OutputState: &state,
},
// Get the saved diff
@ -320,27 +252,29 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
// Compare the diffs
&EvalCompareDiff{
Info: info,
Addr: addr.Resource,
One: &diff,
Two: &diffApply,
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Addr: n.ResolvedProvider,
Output: &provider,
Schema: &providerSchema,
},
&EvalReadState{
Name: stateId,
Output: &state,
},
// Call pre-apply hook
&EvalApplyPre{
Info: info,
Addr: addr.Resource,
State: &state,
Diff: &diffApply,
},
&EvalApply{
Info: info,
Addr: addr.Resource,
State: &state,
Diff: &diffApply,
Provider: &provider,
@ -356,13 +290,12 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
State: &state,
},
&EvalApplyProvisioners{
Info: info,
Addr: addr.Resource,
State: &state,
Resource: n.Config,
InterpResource: resource,
ResourceConfig: n.Config,
CreateNew: &createNew,
Error: &err,
When: config.ProvisionerWhenCreate,
When: configs.ProvisionerWhenCreate,
},
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
@ -390,7 +323,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
},
&EvalApplyPost{
Info: info,
Addr: addr.Resource,
State: &state,
Error: &err,
},

View File

@ -1,82 +1,90 @@
package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
)
// NodeDestroyResource represents a resource that is to be destroyed.
type NodeDestroyResource struct {
*NodeAbstractResource
type NodeDestroyResourceInstance struct {
*NodeAbstractResourceInstance
CreateBeforeDestroyOverride *bool
}
func (n *NodeDestroyResource) Name() string {
return n.NodeAbstractResource.Name() + " (destroy)"
var (
_ GraphNodeResource = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil)
_ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil)
)
func (n *NodeDestroyResourceInstance) Name() string {
return n.ResourceInstanceAddr().String() + " (destroy)"
}
// GraphNodeDestroyer
func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
return n.Addr
func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
addr := n.ResourceInstanceAddr()
return &addr
}
// GraphNodeDestroyerCBD
func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool {
if n.CreateBeforeDestroyOverride != nil {
return *n.CreateBeforeDestroyOverride
}
// If we have no config, we just assume no
if n.Config == nil {
if n.Config == nil || n.Config.Managed == nil {
return false
}
return n.Config.Lifecycle.CreateBeforeDestroy
return n.Config.Managed.CreateBeforeDestroy
}
// GraphNodeDestroyerCBD
func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
// If we have no config, do nothing since it won't affect the
// create step anyways.
if n.Config == nil {
return nil
}
// Set CBD to true
n.Config.Lifecycle.CreateBeforeDestroy = true
func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error {
n.CreateBeforeDestroyOverride = &v
return nil
}
// GraphNodeReferenceable, overriding NodeAbstractResource
func (n *NodeDestroyResource) ReferenceableName() []string {
// We modify our referenceable name to have the suffix of ".destroy"
// since depending on the creation side doesn't necessarilly mean
// depending on destruction.
suffix := ".destroy"
// If we're CBD, we also append "-cbd". This is because CBD will setup
// its own edges (in CBDEdgeTransformer). Depending on the "destroy"
// side generally doesn't mean depending on CBD as well. See GH-11349
if n.CreateBeforeDestroy() {
suffix += "-cbd"
func (n *NodeDestroyResourceInstance) ReferenceableName() []addrs.Referenceable {
relAddr := n.ResourceInstanceAddr().Resource
switch {
case n.CreateBeforeDestroy():
return []addrs.Referenceable{
relAddr.ContainingResource(),
relAddr.Phase(addrs.ResourceInstancePhaseDestroyCBD),
}
default:
return []addrs.Referenceable{
relAddr.ContainingResource(),
relAddr.Phase(addrs.ResourceInstancePhaseDestroy),
}
}
result := n.NodeAbstractResource.ReferenceableName()
for i, v := range result {
result[i] = v + suffix
}
return result
}
// GraphNodeReferencer, overriding NodeAbstractResource
func (n *NodeDestroyResource) References() []string {
func (n *NodeDestroyResourceInstance) References() []*addrs.Reference {
// If we have a config, then we need to include destroy-time dependencies
if c := n.Config; c != nil {
var result []string
for _, p := range c.Provisioners {
// We include conn info and config for destroy time provisioners
// as dependencies that we have.
if p.When == config.ProvisionerWhenDestroy {
result = append(result, ReferencesFromConfig(p.ConnInfo)...)
result = append(result, ReferencesFromConfig(p.RawConfig)...)
if c := n.Config; c != nil && c.Managed != nil {
var result []*addrs.Reference
// We include conn info and config for destroy time provisioners
// as dependencies that we have.
for _, p := range c.Managed.Provisioners {
schema := n.ProvisionerSchemas[p.Type]
if p.When == configs.ProvisionerWhenDestroy {
result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...)
result = append(result, ReferencesFromConfig(p.Config, schema)...)
}
}
@ -87,11 +95,9 @@ func (n *NodeDestroyResource) References() []string {
}
// GraphNodeDynamicExpandable
func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
// If we have no config we do nothing
if n.Addr == nil {
return nil, nil
}
func (n *NodeDestroyResourceInstance) DynamicExpand(ctx EvalContext) (*Graph, error) {
// stateId is the legacy-style ID to put into the state
stateId := NewLegacyResourceInstanceAddress(n.ResourceInstanceAddr()).stateId()
state, lock := ctx.State()
lock.RLock()
@ -103,13 +109,13 @@ func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
// We want deposed resources in the state to be destroyed
steps = append(steps, &DeposedTransformer{
State: state,
View: n.Addr.stateId(),
View: stateId,
ResolvedProvider: n.ResolvedProvider,
})
// Target
steps = append(steps, &TargetsTransformer{
ParsedTargets: n.Targets,
Targets: n.Targets,
})
// Always end with the root being added
@ -120,37 +126,22 @@ func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
Steps: steps,
Name: "NodeResourceDestroy",
}
return b.Build(ctx.Path())
g, diags := b.Build(ctx.Path())
return g, diags.ErrWithWarnings()
}
// GraphNodeEvalable
func (n *NodeDestroyResource) EvalTree() EvalNode {
// stateId is the ID to put into the state
stateId := n.Addr.stateId()
func (n *NodeDestroyResourceInstance) EvalTree() EvalNode {
addr := n.ResourceInstanceAddr()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: n.Addr.Type,
uniqueExtra: "destroy",
}
// Build the resource for eval
addr := n.Addr
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// stateId is the legacy-style ID to put into the state
stateId := NewLegacyResourceInstanceAddress(n.ResourceInstanceAddr()).stateId()
// Get our state
rs := n.ResourceState
if rs == nil {
rs = &ResourceState{
Provider: n.ResolvedProvider,
Provider: n.ResolvedProvider.String(),
}
}
@ -187,11 +178,8 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
Then: EvalNoop{},
},
// Load the instance info so we have the module path set
&EvalInstanceInfo{Info: info},
&EvalGetProvider{
Name: n.ResolvedProvider,
Addr: n.ResolvedProvider,
Output: &provider,
},
&EvalReadState{
@ -204,7 +192,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
// Call pre-apply hook
&EvalApplyPre{
Info: info,
Addr: addr.Resource,
State: &state,
Diff: &diffApply,
},
@ -220,12 +208,11 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
},
Then: &EvalApplyProvisioners{
Info: info,
Addr: addr.Resource,
State: &state,
Resource: n.Config,
InterpResource: resource,
ResourceConfig: n.Config,
Error: &err,
When: config.ProvisionerWhenDestroy,
When: configs.ProvisionerWhenDestroy,
},
},
@ -237,7 +224,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
},
Then: &EvalApplyPost{
Info: info,
Addr: addr.Resource,
State: &state,
Error: &err,
},
@ -246,25 +233,17 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
// Make sure we handle data sources properly.
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
if n.Addr == nil {
return false, fmt.Errorf("nil address")
}
if n.Addr.Mode == config.DataResourceMode {
return true, nil
}
return false, nil
return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil
},
Then: &EvalReadDataApply{
Info: info,
Addr: addr.Resource,
Diff: &diffApply,
Provider: &provider,
Output: &state,
},
Else: &EvalApply{
Info: info,
Addr: addr.Resource,
State: &state,
Diff: &diffApply,
Provider: &provider,
@ -274,13 +253,13 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
},
&EvalWriteState{
Name: stateId,
ResourceType: n.Addr.Type,
ResourceType: addr.Resource.Resource.Type,
Provider: n.ResolvedProvider,
Dependencies: rs.Dependencies,
State: &state,
},
&EvalApplyPost{
Info: info,
Addr: addr.Resource,
State: &state,
Error: &err,
},

View File

@ -2,46 +2,62 @@ package terraform
import (
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
)
// NodePlannableResource represents a resource that is "plannable":
// it is ready to be planned in order to create a diff.
type NodePlannableResource struct {
*NodeAbstractCountResource
*NodeAbstractResource
}
var (
_ GraphNodeSubPath = (*NodePlannableResource)(nil)
_ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)
_ GraphNodeReferenceable = (*NodePlannableResource)(nil)
_ GraphNodeReferencer = (*NodePlannableResource)(nil)
_ GraphNodeResource = (*NodePlannableResource)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)
)
// GraphNodeDynamicExpandable
func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
var diags tfdiags.Diagnostics
count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
diags = diags.Append(countDiags)
if countDiags.HasErrors() {
return nil, diags.Err()
}
// Next we need to potentially rename an instance address in the state
// if we're transitioning whether "count" is set at all.
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
// Grab the state which we read
state, lock := ctx.State()
lock.RLock()
defer lock.RUnlock()
// Expand the resource count which must be available by now from EvalTree
count, err := n.Config.Count()
if err != nil {
return nil, err
}
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
return &NodePlannableResourceInstance{
NodeAbstractResource: a,
NodeAbstractResourceInstance: a,
}
}
// The concrete resource factory we'll use for oprhans
concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
// The concrete resource factory we'll use for orphans
concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
return &NodePlannableResourceOrphan{
NodeAbstractResource: a,
return &NodePlannableResourceInstanceOrphan{
NodeAbstractResourceInstance: a,
}
}
@ -66,7 +82,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{ParsedTargets: n.Targets},
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
@ -81,5 +97,6 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
Validate: true,
Name: "NodePlannableResource",
}
return b.Build(ctx.Path())
graph, diags := b.Build(ctx.Path())
return graph, diags.ErrWithWarnings()
}

View File

@ -1,28 +1,40 @@
package terraform
// NodePlanDestroyableResource represents a resource that is "applyable":
// it is ready to be applied and is represented by a diff.
type NodePlanDestroyableResource struct {
*NodeAbstractResource
import (
"github.com/hashicorp/terraform/addrs"
)
// NodePlanDestroyableResourceInstance represents a resource that is ready
// to be planned for destruction.
type NodePlanDestroyableResourceInstance struct {
*NodeAbstractResourceInstance
}
var (
_ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil)
_ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil)
)
// GraphNodeDestroyer
func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
return n.Addr
func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
addr := n.ResourceInstanceAddr()
return &addr
}
// GraphNodeEvalable
func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode {
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
}
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
@ -36,13 +48,14 @@ func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
Output: &state,
},
&EvalDiffDestroy{
Info: info,
Addr: addr.Resource,
State: &state,
Output: &diff,
},
&EvalCheckPreventDestroy{
Resource: n.Config,
Diff: &diff,
Addr: addr.Resource,
Config: n.Config,
Diff: &diff,
},
&EvalWriteDiff{
Name: stateId,

View File

@ -3,64 +3,56 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/zclconf/go-cty/cty"
)
// NodePlannableResourceInstance represents a _single_ resource
// instance that is plannable. This means this represents a single
// count index, for example.
type NodePlannableResourceInstance struct {
*NodeAbstractResource
*NodeAbstractResourceInstance
}
var (
_ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
_ GraphNodeResource = (*NodePlannableResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
_ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil)
)
// GraphNodeEvalable
func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
ModulePath: normalizeModulePath(addr.Path),
}
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Determine the dependencies for the state.
stateDeps := n.StateReferences()
// Eval info is different depending on what kind of resource this is
switch n.Config.Mode {
case config.ManagedResourceMode:
return n.evalTreeManagedResource(
stateId, info, resource, stateDeps,
)
case config.DataResourceMode:
return n.evalTreeDataResource(
stateId, info, resource, stateDeps)
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
return n.evalTreeManagedResource(addr, stateId, stateDeps)
case addrs.DataResourceMode:
return n.evalTreeDataResource(addr, stateId, stateDeps)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodePlannableResourceInstance) evalTreeDataResource(
stateId string, info *InstanceInfo,
resource *Resource, stateDeps []string) EvalNode {
func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []string) EvalNode {
var provider ResourceProvider
var config *ResourceConfig
var providerSchema *ProviderSchema
var diff *InstanceDiff
var state *InstanceState
var configVal cty.Value
return &EvalSequence{
Nodes: []EvalNode{
@ -69,19 +61,25 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(
Output: &state,
},
// We need to re-interpolate the config here because some
// of the attributes may have become computed during
// earlier planning, due to other resources having
// "requires new resource" diffs.
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &config,
&EvalGetProvider{
Addr: n.ResolvedProvider,
Output: &provider,
Schema: &providerSchema,
},
&EvalReadDataDiff{
Addr: addr.Resource,
Config: n.Config,
Provider: &provider,
ProviderSchema: &providerSchema,
Output: &diff,
OutputValue: &configVal,
OutputState: &state,
},
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
computed := !configVal.IsWhollyKnown()
// If the configuration is complete and we
// already have a state then we don't need to
@ -96,19 +94,6 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(
Then: EvalNoop{},
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Output: &provider,
},
&EvalReadDataDiff{
Info: info,
Config: &config,
Provider: &provider,
Output: &diff,
OutputState: &state,
},
&EvalWriteState{
Name: stateId,
ResourceType: n.Config.Type,
@ -125,54 +110,38 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(
}
}
func (n *NodePlannableResourceInstance) evalTreeManagedResource(
stateId string, info *InstanceInfo,
resource *Resource, stateDeps []string) EvalNode {
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []string) EvalNode {
var provider ResourceProvider
var providerSchema *ProviderSchema
var diff *InstanceDiff
var state *InstanceState
var resourceConfig *ResourceConfig
return &EvalSequence{
Nodes: []EvalNode{
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &resourceConfig,
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Output: &provider,
},
// Re-run validation to catch any errors we missed, e.g. type
// mismatches on computed values.
&EvalValidateResource{
Provider: &provider,
Config: &resourceConfig,
ResourceName: n.Config.Name,
ResourceType: n.Config.Type,
ResourceMode: n.Config.Mode,
IgnoreWarnings: true,
},
&EvalReadState{
Name: stateId,
Output: &state,
},
&EvalGetProvider{
Addr: n.ResolvedProvider,
Output: &provider,
Schema: &providerSchema,
},
&EvalDiff{
Name: stateId,
Info: info,
Config: &resourceConfig,
Resource: n.Config,
Provider: &provider,
State: &state,
OutputDiff: &diff,
OutputState: &state,
Addr: addr.Resource,
Config: n.Config,
Provider: &provider,
ProviderSchema: &providerSchema,
State: &state,
OutputDiff: &diff,
OutputState: &state,
},
&EvalCheckPreventDestroy{
Resource: n.Config,
Diff: &diff,
Addr: addr.Resource,
Config: n.Config,
Diff: &diff,
},
&EvalWriteState{
Name: stateId,

View File

@ -1,28 +1,37 @@
package terraform
// NodePlannableResourceOrphan represents a resource that is "applyable":
// NodePlannableResourceInstanceOrphan represents a resource that is "applyable":
// it is ready to be applied and is represented by a diff.
type NodePlannableResourceOrphan struct {
*NodeAbstractResource
type NodePlannableResourceInstanceOrphan struct {
*NodeAbstractResourceInstance
}
func (n *NodePlannableResourceOrphan) Name() string {
return n.NodeAbstractResource.Name() + " (orphan)"
var (
_ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil)
_ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil)
)
var (
_ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil)
)
func (n *NodePlannableResourceInstanceOrphan) Name() string {
return n.ResourceInstanceAddr().String() + " (orphan)"
}
// GraphNodeEvalable
func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode {
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
ModulePath: normalizeModulePath(addr.Path),
}
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
@ -36,14 +45,14 @@ func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
Output: &state,
},
&EvalDiffDestroy{
Info: info,
Addr: addr.Resource,
State: &state,
Output: &diff,
},
&EvalCheckPreventDestroy{
Resource: n.Config,
ResourceId: stateId,
Diff: &diff,
Addr: addr.Resource,
Config: n.Config,
Diff: &diff,
},
&EvalWriteDiff{
Name: stateId,

View File

@ -3,37 +3,53 @@ package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
)
// NodeRefreshableManagedResource represents a resource that is expanabled into
// NodeRefreshableManagedResourceInstance. Resource count orphans are also added.
type NodeRefreshableManagedResource struct {
*NodeAbstractCountResource
*NodeAbstractResource
}
var (
_ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil)
_ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil)
_ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil)
_ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil)
_ GraphNodeResource = (*NodeRefreshableManagedResource)(nil)
_ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil)
)
// GraphNodeDynamicExpandable
func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
var diags tfdiags.Diagnostics
count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
diags = diags.Append(countDiags)
if countDiags.HasErrors() {
return nil, diags.Err()
}
// Next we need to potentially rename an instance address in the state
// if we're transitioning whether "count" is set at all.
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
// Grab the state which we read
state, lock := ctx.State()
lock.RLock()
defer lock.RUnlock()
// Expand the resource count which must be available by now from EvalTree
count, err := n.Config.Count()
if err != nil {
return nil, err
}
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
return &NodeRefreshableManagedResourceInstance{
NodeAbstractResource: a,
NodeAbstractResourceInstance: a,
}
}
@ -59,7 +75,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{ParsedTargets: n.Targets},
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
@ -75,61 +91,72 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
Name: "NodeRefreshableManagedResource",
}
return b.Build(ctx.Path())
graph, diags := b.Build(ctx.Path())
return graph, diags.ErrWithWarnings()
}
// NodeRefreshableManagedResourceInstance represents a resource that is "applyable":
// it is ready to be applied and is represented by a diff.
type NodeRefreshableManagedResourceInstance struct {
*NodeAbstractResource
*NodeAbstractResourceInstance
}
var (
_ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil)
_ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil)
)
// GraphNodeDestroyer
func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress {
return n.Addr
func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
addr := n.ResourceInstanceAddr()
return &addr
}
// GraphNodeEvalable
func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode {
addr := n.ResourceInstanceAddr()
// Eval info is different depending on what kind of resource this is
switch mode := n.Addr.Mode; mode {
case config.ManagedResourceMode:
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
if n.ResourceState == nil {
return n.evalTreeManagedResourceNoState()
}
return n.evalTreeManagedResource()
case config.DataResourceMode:
case addrs.DataResourceMode:
// Get the data source node. If we don't have a configuration
// then it is an orphan so we destroy it (remove it from the state).
var dn GraphNodeEvalable
if n.Config != nil {
dn = &NodeRefreshableDataResourceInstance{
NodeAbstractResource: n.NodeAbstractResource,
NodeAbstractResourceInstance: n.NodeAbstractResourceInstance,
}
} else {
dn = &NodeDestroyableDataResource{
NodeAbstractResource: n.NodeAbstractResource,
NodeAbstractResourceInstance: n.NodeAbstractResourceInstance,
}
}
return dn.EvalTree()
default:
panic(fmt.Errorf("unsupported resource mode %s", mode))
panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode))
}
}
func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode {
addr := n.NodeAbstractResource.Addr
addr := n.ResourceInstanceAddr()
// stateId is the ID to put into the state
stateId := addr.stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
}
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateId := NewLegacyResourceInstanceAddress(addr).stateId()
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
@ -149,20 +176,23 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
return &EvalSequence{
Nodes: []EvalNode{
&EvalGetProvider{
Name: n.ResolvedProvider,
Output: &provider,
},
&EvalReadState{
Name: stateId,
Output: &state,
},
&EvalGetProvider{
Addr: n.ResolvedProvider,
Output: &provider,
},
&EvalRefresh{
Info: info,
Addr: addr.Resource,
Provider: &provider,
State: &state,
Output: &state,
},
&EvalWriteState{
Name: stateId,
ResourceType: n.ResourceState.Type,
@ -186,74 +216,46 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
// plan, but nothing is done with the diff after it is created - it is dropped,
// and its changes are not counted in the UI.
func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode {
addr := n.ResourceInstanceAddr()
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
var provider ResourceProvider
var providerSchema *ProviderSchema
var diff *InstanceDiff
var state *InstanceState
var resourceConfig *ResourceConfig
addr := n.NodeAbstractResource.Addr
stateID := addr.stateId()
info := &InstanceInfo{
Id: stateID,
Type: addr.Type,
ModulePath: normalizeModulePath(addr.Path),
}
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// State still uses legacy-style internal ids, so we need to shim to get
// a suitable key to use.
stateID := NewLegacyResourceInstanceAddress(addr).stateId()
// Determine the dependencies for the state.
stateDeps := n.StateReferences()
// n.Config can be nil if the config and state don't match
var raw *config.RawConfig
if n.Config != nil {
raw = n.Config.RawConfig.Copy()
}
return &EvalSequence{
Nodes: []EvalNode{
&EvalInterpolate{
Config: raw,
Resource: resource,
Output: &resourceConfig,
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Output: &provider,
},
// Re-run validation to catch any errors we missed, e.g. type
// mismatches on computed values.
&EvalValidateResource{
Provider: &provider,
Config: &resourceConfig,
ResourceName: n.Config.Name,
ResourceType: n.Config.Type,
ResourceMode: n.Config.Mode,
IgnoreWarnings: true,
},
&EvalReadState{
Name: stateID,
Output: &state,
},
&EvalDiff{
Name: stateID,
Info: info,
Config: &resourceConfig,
Resource: n.Config,
Provider: &provider,
State: &state,
OutputState: &state,
Stub: true,
&EvalGetProvider{
Addr: n.ResolvedProvider,
Output: &provider,
Schema: &providerSchema,
},
&EvalDiff{
Addr: addr.Resource,
Config: n.Config,
Provider: &provider,
ProviderSchema: &providerSchema,
State: &state,
OutputDiff: &diff,
OutputState: &state,
Stub: true,
},
&EvalWriteState{
Name: stateID,
ResourceType: n.Config.Type,

View File

@ -1,48 +1,62 @@
package terraform
import (
"log"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// NodeValidatableResource represents a resource that is used for validation
// only.
type NodeValidatableResource struct {
*NodeAbstractCountResource
*NodeAbstractResource
}
// GraphNodeEvalable
func (n *NodeValidatableResource) EvalTree() EvalNode {
// Ensure we're validating
c := n.NodeAbstractCountResource
c.Validate = true
return c.EvalTree()
}
var (
_ GraphNodeSubPath = (*NodeValidatableResource)(nil)
_ GraphNodeDynamicExpandable = (*NodeValidatableResource)(nil)
_ GraphNodeReferenceable = (*NodeValidatableResource)(nil)
_ GraphNodeReferencer = (*NodeValidatableResource)(nil)
_ GraphNodeResource = (*NodeValidatableResource)(nil)
_ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil)
)
// GraphNodeDynamicExpandable
func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
var diags tfdiags.Diagnostics
count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
diags = diags.Append(countDiags)
if countDiags.HasErrors() {
log.Printf("[TRACE] %T %s: count expression has errors", n, n.Name())
return nil, diags.Err()
}
if count >= 0 {
log.Printf("[TRACE] %T %s: count expression evaluates to %d", n, n.Name(), count)
} else {
log.Printf("[TRACE] %T %s: no count argument present", n, n.Name())
}
// Next we need to potentially rename an instance address in the state
// if we're transitioning whether "count" is set at all.
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
// Grab the state which we read
state, lock := ctx.State()
lock.RLock()
defer lock.RUnlock()
// Expand the resource count which must be available by now from EvalTree
count := 1
if n.Config.RawCount.Value() != unknownValue() {
var err error
count, err = n.Config.Count()
if err != nil {
return nil, err
}
}
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// Add the config and state since we don't do that via transforms
a.Config = n.Config
a.ResolvedProvider = n.ResolvedProvider
return &NodeValidatableResourceInstance{
NodeAbstractResource: a,
NodeAbstractResourceInstance: a,
}
}
@ -59,7 +73,7 @@ func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error)
&AttachStateTransformer{State: state},
// Targeting
&TargetsTransformer{ParsedTargets: n.Targets},
&TargetsTransformer{Targets: n.Targets},
// Connect references so ordering is correct
&ReferenceTransformer{},
@ -75,84 +89,79 @@ func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error)
Name: "NodeValidatableResource",
}
return b.Build(ctx.Path())
graph, diags := b.Build(ctx.Path())
return graph, diags.ErrWithWarnings()
}
// This represents a _single_ resource instance to validate.
type NodeValidatableResourceInstance struct {
*NodeAbstractResource
*NodeAbstractResourceInstance
}
var (
_ GraphNodeSubPath = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeResource = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodeValidatableResourceInstance)(nil)
_ GraphNodeEvalable = (*NodeValidatableResourceInstance)(nil)
)
// GraphNodeEvalable
func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
addr := n.ResourceInstanceAddr()
config := n.Config
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
var config *ResourceConfig
// evaluation. These are written to via pointers passed to the EvalNodes
// below.
var provider ResourceProvider
var providerSchema *ProviderSchema
var configVal cty.Value
seq := &EvalSequence{
Nodes: []EvalNode{
&EvalValidateResourceSelfRef{
Addr: &addr,
Config: &n.Config.RawConfig,
&EvalValidateSelfRef{
Addr: addr.Resource,
Config: config.Config,
},
&EvalGetProvider{
Name: n.ResolvedProvider,
Addr: n.ResolvedProvider,
Output: &provider,
},
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &config,
Schema: &providerSchema,
},
&EvalValidateResource{
Provider: &provider,
Config: &config,
ResourceName: n.Config.Name,
ResourceType: n.Config.Type,
ResourceMode: n.Config.Mode,
Addr: addr.Resource,
Provider: &provider,
ProviderSchema: &providerSchema,
Config: config,
ConfigVal: &configVal,
},
},
}
// Validate all the provisioners
for _, p := range n.Config.Provisioners {
var provisioner ResourceProvisioner
var connConfig *ResourceConfig
seq.Nodes = append(
seq.Nodes,
&EvalGetProvisioner{
Name: p.Type,
Output: &provisioner,
},
&EvalInterpolate{
Config: p.RawConfig.Copy(),
Resource: resource,
Output: &config,
},
&EvalInterpolate{
Config: p.ConnInfo.Copy(),
Resource: resource,
Output: &connConfig,
},
&EvalValidateProvisioner{
Provisioner: &provisioner,
Config: &config,
ConnConfig: &connConfig,
},
)
if managed := n.Config.Managed; managed != nil {
// Validate all the provisioners
for _, p := range managed.Provisioners {
var provisioner ResourceProvisioner
var provisionerSchema *configschema.Block
seq.Nodes = append(
seq.Nodes,
&EvalGetProvisioner{
Name: p.Type,
Output: &provisioner,
Schema: &provisionerSchema,
},
&EvalValidateProvisioner{
ResourceAddr: addr.Resource,
Provisioner: &provisioner,
Schema: &provisionerSchema,
Config: p,
},
)
}
}
return seq

View File

@ -1,22 +1,31 @@
package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
)
// NodeRootVariable represents a root variable input.
type NodeRootVariable struct {
Config *config.Variable
Addr addrs.InputVariable
Config *configs.Variable
}
var (
_ GraphNodeSubPath = (*NodeRootVariable)(nil)
_ GraphNodeReferenceable = (*NodeRootVariable)(nil)
)
func (n *NodeRootVariable) Name() string {
result := fmt.Sprintf("var.%s", n.Config.Name)
return result
return n.Addr.String()
}
// GraphNodeSubPath
func (n *NodeRootVariable) Path() addrs.ModuleInstance {
return addrs.RootModuleInstance
}
// GraphNodeReferenceable
func (n *NodeRootVariable) ReferenceableName() []string {
return []string{n.Name()}
func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable {
return []addrs.Referenceable{n.Addr}
}

View File

@ -1,10 +1,17 @@
package terraform
import (
"strings"
"fmt"
"github.com/hashicorp/terraform/addrs"
)
// PathCacheKey returns a cache key for a module path.
func PathCacheKey(path []string) string {
return strings.Join(path, "|")
// PathObjectCacheKey is like PathCacheKey but includes an additional name
// to be included in the key, for module-namespaced objects.
//
// The result of this function is guaranteed unique for any distinct pair
// of path and name, but is not guaranteed to be in any particular format
// and in particular should never be shown to end-users.
func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string {
return fmt.Sprintf("%s|%s", path.String(), objectName)
}

View File

@ -9,7 +9,13 @@ import (
"log"
"sync"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/version"
)
@ -31,9 +37,9 @@ type Plan struct {
// plan is applied.
Diff *Diff
// Module represents the entire configuration that was present when this
// Config represents the entire configuration that was present when this
// plan was created.
Module *module.Tree
Config *configs.Config
// State is the Terraform state that was current when this plan was
// created.
@ -44,7 +50,7 @@ type Plan struct {
// Vars retains the variables that were set when creating the plan, so
// that the same variables can be applied during apply.
Vars map[string]interface{}
Vars map[string]cty.Value
// Targets, if non-empty, contains a set of resource address strings that
// identify graph nodes that were selected as targets for plan.
@ -86,11 +92,13 @@ type Plan struct {
// If State is not provided, it is set from the plan. If it _is_ provided,
// it must be Equal to the state stored in plan, but may have a newer
// serial.
func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
func (p *Plan) Context(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
var err error
opts, err = p.contextOpts(opts)
if err != nil {
return nil, err
var diags tfdiags.Diagnostics
diags = diags.Append(err)
return nil, diags
}
return NewContext(opts)
}
@ -101,11 +109,30 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
opts := base
opts.Diff = p.Diff
opts.Module = p.Module
opts.Targets = p.Targets
opts.Config = p.Config
opts.ProviderSHA256s = p.ProviderSHA256s
opts.Destroy = p.Destroy
if len(p.Targets) != 0 {
// We're still using target strings in the Plan struct, so we need to
// convert to our address representation here.
// FIXME: Change the Plan struct to use addrs.Targetable itself, and
// then handle these conversions when we read/write plans on disk.
targets := make([]addrs.Targetable, len(p.Targets))
for i, targetStr := range p.Targets {
traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(targetStr), "", hcl.Pos{})
if travDiags.HasErrors() {
return nil, travDiags
}
target, targDiags := addrs.ParseTarget(traversal)
if targDiags.HasErrors() {
return nil, targDiags.Err()
}
targets[i] = target.Subject
}
opts.Targets = targets
}
if opts.State == nil {
opts.State = p.State
} else if !opts.State.Equal(p.State) {
@ -128,9 +155,12 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
)
}
opts.Variables = make(map[string]interface{})
opts.Variables = make(InputValues)
for k, v := range p.Vars {
opts.Variables[k] = v
opts.Variables[k] = &InputValue{
Value: v,
SourceType: ValueFromPlan,
}
}
return opts, nil
@ -158,7 +188,7 @@ func (p *Plan) init() {
}
if p.Vars == nil {
p.Vars = make(map[string]interface{})
p.Vars = make(map[string]cty.Value)
}
})
}

View File

@ -7,9 +7,15 @@ import (
"strconv"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/config/hcl2shim"
"github.com/hashicorp/terraform/config"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/reflectwalk"
"github.com/zclconf/go-cty/cty"
)
// ResourceProvisionerConfig is used to pair a provisioner
@ -25,9 +31,10 @@ type ResourceProvisionerConfig struct {
ConnInfo *config.RawConfig
}
// Resource encapsulates a resource, its configuration, its provider,
// its current state, and potentially a desired diff from the state it
// wants to reach.
// Resource is a legacy way to identify a particular resource instance.
//
// New code should use addrs.ResourceInstance instead. This is still here
// only for codepaths that haven't been updated yet.
type Resource struct {
// These are all used by the new EvalNode stuff.
Name string
@ -47,6 +54,31 @@ type Resource struct {
Flags ResourceFlag
}
// NewResource constructs a legacy Resource object from an
// addrs.ResourceInstance value.
//
// This is provided to shim to old codepaths that haven't been updated away
// from this type yet. Since this old type is not able to represent instances
// that have string keys, this function will panic if given a resource address
// that has a string key.
func NewResource(addr addrs.ResourceInstance) *Resource {
ret := &Resource{
Name: addr.Resource.Name,
Type: addr.Resource.Type,
}
if addr.Key != addrs.NoKey {
switch tk := addr.Key.(type) {
case addrs.IntKey:
ret.CountIndex = int(tk)
default:
panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key))
}
}
return ret
}
// ResourceKind specifies what kind of instance we're working with, whether
// its a primary instance, a tainted instance, or an orphan.
type ResourceFlag byte
@ -72,6 +104,42 @@ type InstanceInfo struct {
uniqueExtra string
}
// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResource.
//
// In spite of the confusing name, an InstanceInfo actually identifies a
// particular resource rather than a particular resource instance.
// InstanceInfo is a legacy type, and uses of it should be gradually replaced
// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as
// appropriate.
//
// The legacy InstanceInfo type cannot represent module instances with instance
// keys, so this function will panic if given such a path. Uses of this type
// should all be removed or replaced before implementing "count" and "for_each"
// arguments on modules in order to avoid such panics.
func NewInstanceInfo(addr addrs.AbsResource) *InstanceInfo {
// We need an old-style []string module path for InstanceInfo.
path := make([]string, len(addr.Module))
for i, step := range addr.Module {
if step.InstanceKey != addrs.NoKey {
panic("NewInstanceInfo cannot convert module instance with key")
}
path[i] = step.Name
}
// This is a funny old meaning of "id" that is no longer current. It should
// not be used for anything users might see. Note that it does not include
// a representation of the resource mode, and so it's impossible to
// determine from an InstanceInfo alone whether it is a managed or data
// resource that is being referred to.
id := fmt.Sprintf("%s.%s", addr.Resource.Type, addr.Resource.Name)
return &InstanceInfo{
Id: id,
ModulePath: path,
Type: addr.Resource.Type,
}
}
// HumanId is a unique Id that is human-friendly and useful for UI elements.
func (i *InstanceInfo) HumanId() string {
if i == nil {
@ -137,9 +205,9 @@ func (i *InstanceInfo) uniqueId() string {
return prefix
}
// ResourceConfig holds the configuration given for a resource. This is
// done instead of a raw `map[string]interface{}` type so that rich
// methods can be added to it to make dealing with it easier.
// ResourceConfig is a legacy type that was formerly used to represent
// interpolatable configuration blocks. It is now only used to shim to old
// APIs that still use this type, via NewResourceConfigShimmed.
type ResourceConfig struct {
ComputedKeys []string
Raw map[string]interface{}
@ -155,6 +223,98 @@ func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
return result
}
// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy
// ResourceConfig object, so that it can be passed to older APIs that expect
// this wrapping.
//
// The returned ResourceConfig is already interpolated and cannot be
// re-interpolated. It is, therefore, useful only to functions that expect
// an already-populated ResourceConfig which they then treat as read-only.
//
// If the given value is not of an object type that conforms to the given
// schema then this function will panic.
func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig {
if !val.Type().IsObjectType() {
panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type()))
}
ret := &ResourceConfig{}
legacyVal := hcl2shim.ConfigValueFromHCL2(val)
ret.Config = legacyVal.(map[string]interface{}) // guaranteed compatible because we require an object type
ret.Raw = ret.Config
// Now we need to walk through our structure and find any unknown values,
// producing the separate list ComputedKeys to represent these. We use the
// schema here so that we can preserve the expected invariant
// that an attribute is always either wholly known or wholly unknown, while
// a child block can be partially unknown.
ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, schema, "")
return ret
}
// newResourceConfigShimmedComputedKeys finds all of the unknown values in the
// given object, which must conform to the given schema, returning them in
// the format that's expected for ResourceConfig.ComputedKeys.
func newResourceConfigShimmedComputedKeys(obj cty.Value, schema *configschema.Block, prefix string) []string {
var ret []string
ty := obj.Type()
for attrName := range schema.Attributes {
if !ty.HasAttribute(attrName) {
// Should never happen, but we'll tolerate it anyway
continue
}
attrVal := obj.GetAttr(attrName)
if !attrVal.IsWhollyKnown() {
ret = append(ret, prefix+attrName)
}
}
for typeName, blockS := range schema.BlockTypes {
if !ty.HasAttribute(typeName) {
// Should never happen, but we'll tolerate it anyway
continue
}
blockVal := obj.GetAttr(typeName)
switch blockS.Nesting {
case configschema.NestingSingle:
keys := newResourceConfigShimmedComputedKeys(blockVal, &blockS.Block, fmt.Sprintf("%s%s.", prefix, typeName))
ret = append(ret, keys...)
case configschema.NestingList, configschema.NestingSet:
// Producing computed keys items for sets is not really useful
// since they are not usefully addressable anyway, but we'll treat
// them like lists just so that ret.ComputedKeys accounts for them
// all. Our legacy system didn't support sets here anyway, so
// treating them as lists is the most accurate translation. Although
// set traversal isn't in any particular order, it is _stable_ as
// long as the list isn't mutated, and so we know we'll see the
// same order here as hcl2shim.ConfigValueFromHCL2 would've seen
// inside NewResourceConfigShimmed above.
i := 0
for it := blockVal.ElementIterator(); it.Next(); i++ {
_, subVal := it.Element()
subPrefix := fmt.Sprintf("%s%d.", prefix, i)
keys := newResourceConfigShimmedComputedKeys(subVal, &blockS.Block, subPrefix)
ret = append(ret, keys...)
}
case configschema.NestingMap:
for it := blockVal.ElementIterator(); it.Next(); {
subK, subVal := it.Element()
subPrefix := fmt.Sprintf("%s%s.", prefix, subK.AsString())
keys := newResourceConfigShimmedComputedKeys(subVal, &blockS.Block, subPrefix)
ret = append(ret, keys...)
}
default:
// Should never happen, since the above is exhaustive.
panic(fmt.Errorf("unsupported block nesting type %s", blockS.Nesting))
}
}
return ret
}
// DeepCopy performs a deep copy of the configuration. This makes it safe
// to modify any of the structures that are part of the resource config without
// affecting the original configuration.
@ -374,6 +534,14 @@ func (c *ResourceConfig) get(
// refactor is complete.
func (c *ResourceConfig) interpolateForce() {
if c.raw == nil {
// If we don't have a lowercase "raw" but we _do_ have the uppercase
// Raw populated then this indicates that we're recieving a shim
// ResourceConfig created by NewResourceConfigShimmed, which is already
// fully evaluated and thus this function doesn't need to do anything.
if c.Raw != nil {
return
}
var err error
c.raw, err = config.NewRawConfig(make(map[string]interface{}))
if err != nil {

View File

@ -289,6 +289,144 @@ func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAd
return addr, nil
}
// NewLegacyResourceAddress creates a ResourceAddress from a new-style
// addrs.AbsResource value.
//
// This is provided for shimming purposes so that we can still easily call into
// older functions that expect the ResourceAddress type.
func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress {
ret := &ResourceAddress{
Type: addr.Resource.Type,
Name: addr.Resource.Name,
}
switch addr.Resource.Mode {
case addrs.ManagedResourceMode:
ret.Mode = config.ManagedResourceMode
case addrs.DataResourceMode:
ret.Mode = config.DataResourceMode
default:
panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Mode))
}
path := make([]string, len(addr.Module))
for i, step := range addr.Module {
if step.InstanceKey != addrs.NoKey {
// At the time of writing this can't happen because we don't
// ket generate keyed module instances. This legacy codepath must
// be removed before we can support "count" and "for_each" for
// modules.
panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey))
}
path[i] = step.Name
}
ret.Path = path
ret.Index = -1
return ret
}
// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style
// addrs.AbsResource value.
//
// This is provided for shimming purposes so that we can still easily call into
// older functions that expect the ResourceAddress type.
func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress {
ret := &ResourceAddress{
Type: addr.Resource.Resource.Type,
Name: addr.Resource.Resource.Name,
}
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
ret.Mode = config.ManagedResourceMode
case addrs.DataResourceMode:
ret.Mode = config.DataResourceMode
default:
panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Resource.Mode))
}
path := make([]string, len(addr.Module))
for i, step := range addr.Module {
if step.InstanceKey != addrs.NoKey {
// At the time of writing this can't happen because we don't
// ket generate keyed module instances. This legacy codepath must
// be removed before we can support "count" and "for_each" for
// modules.
panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey))
}
path[i] = step.Name
}
ret.Path = path
if addr.Resource.Key == addrs.NoKey {
ret.Index = 0
} else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok {
ret.Index = int(ik)
} else {
panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key))
}
return ret
}
// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to
// the new resource address type addrs.AbsResourceInstance.
//
// This method can be used only on an address that has a resource specification.
// It will panic if called on a module-path-only ResourceAddress. Use
// method HasResourceSpec to check before calling, in contexts where it is
// unclear.
//
// addrs.AbsResourceInstance does not represent the "tainted" and "deposed"
// states, and so if these are present on the receiver then they are discarded.
//
// This is provided for shimming purposes so that we can easily adapt functions
// that are returning the legacy ResourceAddress type, for situations where
// the new type is required.
func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance {
if !addr.HasResourceSpec() {
panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec")
}
ret := addrs.AbsResourceInstance{
Module: addr.ModuleInstanceAddr(),
Resource: addrs.ResourceInstance{
Resource: addrs.Resource{
Type: addr.Type,
Name: addr.Name,
},
},
}
switch addr.Mode {
case config.ManagedResourceMode:
ret.Resource.Resource.Mode = addrs.ManagedResourceMode
case config.DataResourceMode:
ret.Resource.Resource.Mode = addrs.DataResourceMode
default:
panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode))
}
if addr.Index != -1 {
ret.Resource.Key = addrs.IntKey(addr.Index)
}
return ret
}
// ModuleInstanceAddr returns the module path portion of the receiver as a
// addrs.ModuleInstance value.
func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance {
path := make(addrs.ModuleInstance, len(addr.Path))
for i, name := range addr.Path {
path[i] = addrs.ModuleInstanceStep{Name: name}
}
return path
}
// Contains returns true if and only if the given node is contained within
// the receiver.
//

View File

@ -1,9 +1,20 @@
package terraform
import (
"github.com/hashicorp/terraform/config/configschema"
)
// ResourceProvisioner is an interface that must be implemented by any
// resource provisioner: the thing that initializes resources in
// a Terraform configuration.
type ResourceProvisioner interface {
// GetConfigSchema returns the schema for the provisioner type's main
// configuration block. This is called prior to Validate to enable some
// basic structural validation to be performed automatically and to allow
// the configuration to be properly extracted from potentially-ambiguous
// configuration file formats.
GetConfigSchema() (*configschema.Block, error)
// Validate is called once at the beginning with the raw
// configuration (no interpolation done) and can return a list of warnings
// and/or errors.

View File

@ -2,11 +2,13 @@ package terraform
import (
"fmt"
"strings"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty/convert"
)
// GraphSemanticChecker is the interface that semantic checks across
@ -49,84 +51,87 @@ type SemanticChecker interface {
Check(*dag.Graph, dag.Vertex) error
}
// smcUserVariables does all the semantic checks to verify that the
// variables given satisfy the configuration itself.
func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
var errs []error
// checkInputVariables ensures that variable values supplied at the UI conform
// to their corresponding declarations in configuration.
//
// The set of values is considered valid only if the returned diagnostics
// does not contain errors. A valid set of values may still produce warnings,
// which should be returned to the user.
func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
cvs := make(map[string]*config.Variable)
for _, v := range c.Variables {
cvs[v.Name] = v
}
// Check that all required variables are present
required := make(map[string]struct{})
for _, v := range c.Variables {
if v.Required() {
required[v.Name] = struct{}{}
}
}
for k, _ := range vs {
delete(required, k)
}
if len(required) > 0 {
for k, _ := range required {
errs = append(errs, fmt.Errorf(
"Required variable not set: %s", k))
}
}
// Check that types match up
for name, proposedValue := range vs {
// Check for "map.key" fields. These stopped working with Terraform
// 0.7 but we do this to surface a better error message informing
// the user what happened.
if idx := strings.Index(name, "."); idx > 0 {
key := name[:idx]
if _, ok := cvs[key]; ok {
errs = append(errs, fmt.Errorf(
"%s: Overriding map keys with the format `name.key` is no "+
"longer allowed. You may still override keys by setting "+
"`name = { key = value }`. The maps will be merged. This "+
"behavior appeared in 0.7.0.", name))
continue
}
}
schema, ok := cvs[name]
if !ok {
for name, vc := range vcs {
val, isSet := vs[name]
if !isSet {
// Always an error, since the caller should already have included
// default values from the configuration in the values map.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unassigned variable",
fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name),
))
continue
}
declaredType := schema.Type()
wantType := vc.Type
switch declaredType {
case config.VariableTypeString:
switch proposedValue.(type) {
case string:
continue
}
case config.VariableTypeMap:
switch v := proposedValue.(type) {
case map[string]interface{}:
continue
case []map[string]interface{}:
// if we have a list of 1 map, it will get coerced later as needed
if len(v) == 1 {
continue
}
}
case config.VariableTypeList:
switch proposedValue.(type) {
case []interface{}:
continue
// A given value is valid if it can convert to the desired type.
_, err := convert.Convert(val.Value, wantType)
if err != nil {
switch val.SourceType {
case ValueFromConfig, ValueFromFile:
// We have source location information for these.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid value for input variable",
Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err),
Subject: val.SourceRange.ToHCL().Ptr(),
})
case ValueFromEnvVar:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid value for input variable",
fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err),
))
case ValueFromCLIArg:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid value for input variable",
fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err),
))
case ValueFromInput:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid value for input variable",
fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err),
))
default:
// The above gets us good coverage for the situations users
// are likely to encounter with their own inputs. The other
// cases are generally implementation bugs, so we'll just
// use a generic error for these.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid value for input variable",
fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err),
))
}
}
errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
name, declaredType.Printable(), hclTypeName(proposedValue)))
}
// TODO(mitchellh): variables that are unknown
// Check for any variables that are assigned without being configured.
// This is always an implementation error in the caller, because we
// expect undefined variables to be caught during context construction
// where there is better context to report it well.
for name := range vs {
if _, defined := vcs[name]; !defined {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Value assigned to undeclared variable",
fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name),
))
}
}
return errs
return diags
}

View File

@ -2,56 +2,43 @@ package terraform
import (
"testing"
"github.com/zclconf/go-cty/cty"
)
func TestSMCUserVariables(t *testing.T) {
c := testConfig(t, "smc-uservars")
c := testModule(t, "smc-uservars")
// Required variables not set
errs := smcUserVariables(c, nil)
if len(errs) == 0 {
t.Fatal("should have errors")
diags := checkInputVariables(c.Module.Variables, nil)
if !diags.HasErrors() {
t.Fatal("check succeeded, but want errors")
}
// Required variables set, optional variables unset
errs = smcUserVariables(c, map[string]interface{}{"foo": "bar"})
if len(errs) != 0 {
t.Fatalf("err: %#v", errs)
}
// Mapping element override
errs = smcUserVariables(c, map[string]interface{}{
"foo": "bar",
"map.foo": "baz",
diags = checkInputVariables(c.Module.Variables, InputValues{
"foo": &InputValue{
Value: cty.StringVal("bar"),
SourceType: ValueFromCLIArg,
},
})
if len(errs) == 0 {
t.Fatalf("err: %#v", errs)
if diags.HasErrors() {
t.Fatalf("unexpected errors: %s", diags.Err())
}
// Mapping complete override
errs = smcUserVariables(c, map[string]interface{}{
"foo": "bar",
"map": "baz",
})
if len(errs) == 0 {
t.Fatal("should have errors")
}
}
func TestSMCUserVariables_mapFromJSON(t *testing.T) {
c := testConfig(t, "uservars-map")
// ensure that a single map in a list can satisfy a map variable, since it
// will be coerced later to a map
err := smcUserVariables(c, map[string]interface{}{
"test_map": []map[string]interface{}{
map[string]interface{}{
"foo": "bar",
},
diags = checkInputVariables(c.Module.Variables, InputValues{
"foo": &InputValue{
Value: cty.StringVal("bar"),
SourceType: ValueFromCLIArg,
},
"map": &InputValue{
Value: cty.StringVal("baz"),
SourceType: ValueFromCLIArg,
},
})
if err != nil {
t.Fatal(err)
if !diags.HasErrors() {
t.Fatal("check succeeded, but want errors")
}
}

View File

@ -19,13 +19,17 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
tfversion "github.com/hashicorp/terraform/version"
"github.com/mitchellh/copystructure"
"github.com/zclconf/go-cty/cty"
ctyjson "github.com/zclconf/go-cty/cty/json"
"github.com/mitchellh/copystructure"
)
const (
@ -36,26 +40,38 @@ const (
// rootModulePath is the path of the root module
var rootModulePath = []string{"root"}
// normalizeModulePath transforms a legacy module path (which may or may not
// have a redundant "root" label at the start of it) into an
// addrs.ModuleInstance representing the same module.
//
// For legacy reasons, different parts of Terraform disagree about whether the
// root module has the path []string{} or []string{"root"}, and so this
// function accepts both and trims off the "root". An implication of this is
// that it's not possible to actually have a module call in the root module
// that is itself named "root", since that would be ambiguous.
//
// normalizeModulePath takes a raw module path and returns a path that
// has the rootModulePath prepended to it. If I could go back in time I
// would've never had a rootModulePath (empty path would be root). We can
// still fix this but thats a big refactor that my branch doesn't make sense
// for. Instead, this function normalizes paths.
func normalizeModulePath(p []string) []string {
k := len(rootModulePath)
func normalizeModulePath(p []string) addrs.ModuleInstance {
// FIXME: Remove this once everyone is using addrs.ModuleInstance.
// If we already have a root module prefix, we're done
if len(p) >= len(rootModulePath) {
if reflect.DeepEqual(p[:k], rootModulePath) {
return p
}
if len(p) > 0 && p[0] == "root" {
p = p[1:]
}
// None? Prefix it
result := make([]string, len(rootModulePath)+len(p))
copy(result, rootModulePath)
copy(result[k:], p)
return result
ret := make(addrs.ModuleInstance, len(p))
for i, name := range p {
// For now we don't actually support modules with multiple instances
// identified by keys, so we just treat every path element as a
// step with no key.
ret[i] = addrs.ModuleInstanceStep{
Name: name,
}
}
return ret
}
// State keeps track of a snapshot state-of-the-world that Terraform
@ -141,21 +157,37 @@ func (s *State) children(path []string) []*ModuleState {
//
// This should be the preferred method to add module states since it
// allows us to optimize lookups later as well as control sorting.
func (s *State) AddModule(path []string) *ModuleState {
func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState {
s.Lock()
defer s.Unlock()
return s.addModule(path)
}
func (s *State) addModule(path []string) *ModuleState {
func (s *State) addModule(path addrs.ModuleInstance) *ModuleState {
// check if the module exists first
m := s.moduleByPath(path)
if m != nil {
return m
}
m = &ModuleState{Path: path}
// Lower the new-style address into a legacy-style address.
// This requires that none of the steps have instance keys, which is
// true for all addresses at the time of implementing this because
// "count" and "for_each" are not yet implemented for modules.
legacyPath := make([]string, len(path))
for i, step := range path {
if step.InstanceKey != addrs.NoKey {
// FIXME: Once the rest of Terraform is ready to use count and
// for_each, remove all of this and just write the addrs.ModuleInstance
// value itself into the ModuleState.
panic("state cannot represent modules with count or for_each keys")
}
legacyPath[i] = step.Name
}
m = &ModuleState{Path: legacyPath}
m.init()
s.Modules = append(s.Modules, m)
s.sort()
@ -165,7 +197,7 @@ func (s *State) addModule(path []string) *ModuleState {
// ModuleByPath is used to lookup the module state for the given path.
// This should be the preferred lookup mechanism as it allows for future
// lookup optimizations.
func (s *State) ModuleByPath(path []string) *ModuleState {
func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState {
if s == nil {
return nil
}
@ -175,7 +207,7 @@ func (s *State) ModuleByPath(path []string) *ModuleState {
return s.moduleByPath(path)
}
func (s *State) moduleByPath(path []string) *ModuleState {
func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState {
for _, mod := range s.Modules {
if mod == nil {
continue
@ -183,7 +215,8 @@ func (s *State) moduleByPath(path []string) *ModuleState {
if mod.Path == nil {
panic("missing module path")
}
if reflect.DeepEqual(mod.Path, path) {
modPath := normalizeModulePath(mod.Path)
if modPath.String() == path.String() {
return mod
}
}
@ -397,8 +430,9 @@ func (s *State) Remove(addr ...string) error {
// Go through each result and grab what we need
removed := make(map[interface{}]struct{})
for _, r := range results {
// Convert the path to our own type
path := append([]string{"root"}, r.Path...)
// Convert the legacy path used by the state filter API into a
// new-style module instance address.
path := normalizeModulePath(r.Path)
// If we removed this already, then ignore
if _, ok := removed[r.Value]; ok {
@ -435,7 +469,7 @@ func (s *State) Remove(addr ...string) error {
return nil
}
func (s *State) removeModule(path []string, v *ModuleState) {
func (s *State) removeModule(path addrs.ModuleInstance, v *ModuleState) {
for i, m := range s.Modules {
if m == v {
s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
@ -444,7 +478,7 @@ func (s *State) removeModule(path []string, v *ModuleState) {
}
}
func (s *State) removeResource(path []string, v *ResourceState) {
func (s *State) removeResource(path addrs.ModuleInstance, v *ResourceState) {
// Get the module this resource lives in. If it doesn't exist, we're done.
mod := s.moduleByPath(path)
if mod == nil {
@ -463,7 +497,7 @@ func (s *State) removeResource(path []string, v *ResourceState) {
}
}
func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
func (s *State) removeInstance(path addrs.ModuleInstance, r *ResourceState, v *InstanceState) {
// Go through the resource and find the instance that matches this
// (if any) and remove it.
@ -490,7 +524,7 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState
// RootModule returns the ModuleState for the root module
func (s *State) RootModule() *ModuleState {
root := s.ModuleByPath(rootModulePath)
root := s.ModuleByPath(addrs.RootModuleInstance)
if root == nil {
panic("missing root module")
}
@ -525,7 +559,7 @@ func (s *State) equal(other *State) bool {
}
for _, m := range s.Modules {
// This isn't very optimal currently but works.
otherM := other.moduleByPath(m.Path)
otherM := other.moduleByPath(normalizeModulePath(m.Path))
if otherM == nil {
return false
}
@ -684,8 +718,8 @@ func (s *State) init() {
s.Version = StateVersion
}
if s.moduleByPath(rootModulePath) == nil {
s.addModule(rootModulePath)
if s.moduleByPath(addrs.RootModuleInstance) == nil {
s.addModule(addrs.RootModuleInstance)
}
s.ensureHasLineage()
@ -1092,58 +1126,60 @@ func (m *ModuleState) IsDescendent(other *ModuleState) bool {
// Orphans returns a list of keys of resources that are in the State
// but aren't present in the configuration itself. Hence, these keys
// represent the state of resources that are orphans.
func (m *ModuleState) Orphans(c *config.Config) []string {
func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance {
m.Lock()
defer m.Unlock()
keys := make(map[string]struct{})
for k := range m.Resources {
keys[k] = struct{}{}
}
inConfig := make(map[string]struct{})
if c != nil {
for _, r := range c.Resources {
delete(keys, r.Id())
for k := range keys {
if strings.HasPrefix(k, r.Id()+".") {
delete(keys, k)
}
}
for _, r := range c.ManagedResources {
inConfig[r.Addr().String()] = struct{}{}
}
for _, r := range c.DataResources {
inConfig[r.Addr().String()] = struct{}{}
}
}
result := make([]string, 0, len(keys))
for k := range keys {
result = append(result, k)
}
var result []addrs.ResourceInstance
for k := range m.Resources {
// Since we've not yet updated state to use our new address format,
// we need to do some shimming here.
legacyAddr, err := parseResourceAddressInternal(k)
if err != nil {
// Suggests that the user tampered with the state, since we always
// generate valid internal addresses.
log.Printf("ModuleState has invalid resource key %q. Ignoring.", k)
continue
}
addr := legacyAddr.AbsResourceInstanceAddr().Resource
compareKey := addr.Resource.String() // compare by resource address, ignoring instance key
if _, exists := inConfig[compareKey]; !exists {
result = append(result, addr)
}
}
return result
}
// RemovedOutputs returns a list of outputs that are in the State but aren't
// present in the configuration itself.
func (m *ModuleState) RemovedOutputs(c *config.Config) []string {
m.Lock()
defer m.Unlock()
keys := make(map[string]struct{})
for k := range m.Outputs {
keys[k] = struct{}{}
func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue {
if len(outputs) == 0 {
return nil
}
s.Lock()
defer s.Unlock()
if c != nil {
for _, o := range c.Outputs {
delete(keys, o.Name)
var ret []addrs.OutputValue
for n := range s.Outputs {
if _, declared := outputs[n]; !declared {
ret = append(ret, addrs.OutputValue{
Name: n,
})
}
}
result := make([]string, 0, len(keys))
for k := range keys {
result = append(result, k)
}
return result
return ret
}
// View returns a view with the given resource prefix.
@ -1546,6 +1582,24 @@ func (s *ResourceState) Untaint() {
}
}
// ProviderAddr returns the provider address for the receiver, by parsing the
// string representation saved in state. An error can be returned if the
// value in state is corrupt.
func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) {
var diags tfdiags.Diagnostics
str := s.Provider
traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(travDiags)
if travDiags.HasErrors() {
return addrs.AbsProviderConfig{}, diags.Err()
}
addr, addrDiags := addrs.ParseAbsProviderConfig(traversal)
diags = diags.Append(addrDiags)
return addr, diags.Err()
}
func (s *ResourceState) init() {
s.Lock()
defer s.Unlock()
@ -2190,17 +2244,27 @@ func (s moduleStateSort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// StateCompatible returns an error if the state is not compatible with the
// current version of terraform.
func CheckStateVersion(state *State) error {
// CheckStateVersion returns error diagnostics if the state is not compatible
// with the current version of Terraform Core.
func CheckStateVersion(state *State, allowFuture bool) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
if state == nil {
return nil
return diags
}
if state.FromFutureTerraform() {
return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion)
if state.FromFutureTerraform() && !allowFuture {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Incompatible Terraform state format",
fmt.Sprintf(
"For safety reasons, Terraform will not run operations against a state that was written by a future Terraform version. Your current version is %s, but the state requires at least %s. To proceed, upgrade Terraform to a suitable version.",
tfversion.String(), state.TFVersion,
),
))
}
return nil
return diags
}
const stateValidateErrMultiModule = `
@ -2211,11 +2275,3 @@ in your state file that point to the same module. This will cause Terraform
to behave in unexpected and error prone ways and is invalid. Please back up
and modify your state file manually to resolve this.
`
const stateInvalidTerraformVersionErr = `
Terraform doesn't allow running any operations against a state
that was written by a future Terraform version. The state is
reporting it is written by Terraform '%s'
Please run at least that version of Terraform to continue.
`

View File

@ -91,7 +91,7 @@ func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw i
src := root.(*ModuleState).deepcopy()
// If the target module exists, it is an error
path := append([]string{"root"}, addr.Path...)
path := normalizeModulePath(addr.Path)
if s.ModuleByPath(path) != nil {
return fmt.Errorf("module target is not empty: %s", addr)
}
@ -317,7 +317,7 @@ func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
addType := detectAddrAddLoc(addr)
// Get the module
path := append([]string{"root"}, addr.Path...)
path := normalizeModulePath(addr.Path)
exists := true
mod := s.ModuleByPath(path)
if mod == nil {

Some files were not shown because too many files have changed in this diff Show More