use recorded changes for outputs

We record output changes in the plan, but don't currently use them for
anything other than display. If we have a wholly known output value
stored in the plan, we should prefer that for apply in order to ensure
consistency with the planned values. This also avoids cases where
evaluation during apply cannot happen correctly, like when all resources
are being removed or we are executing a destroy.

We also need to record output Delete changes when the plan is for
destroy operation. Otherwise without a change, the apply step will
attempt to evaluate the outputs, causing errors, or leaving them in the
state with stale values.
This commit is contained in:
James Bardin 2020-10-09 11:09:36 -04:00
parent eb2a027684
commit d8e6d66362
5 changed files with 145 additions and 32 deletions

View File

@ -9142,10 +9142,13 @@ func TestContext2Apply_plannedDestroyInterpolatedCount(t *testing.T) {
}
// Applying the plan should now succeed
_, diags = ctx.Apply()
state, diags = ctx.Apply()
if diags.HasErrors() {
t.Fatalf("apply failed: %s", diags.Err())
}
if !state.Empty() {
t.Fatalf("state not empty: %s\n", state)
}
}
func TestContext2Apply_scaleInMultivarRef(t *testing.T) {

View File

@ -91,7 +91,7 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
&RootVariableTransformer{Config: b.Config},
&ModuleVariableTransformer{Config: b.Config},
&LocalTransformer{Config: b.Config},
&OutputTransformer{Config: b.Config},
&OutputTransformer{Config: b.Config, Changes: b.Changes},
// Creates all the resource instances represented in the diff, along
// with dependency edges against the whole-resource nodes added by

View File

@ -72,6 +72,11 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
State: b.State,
},
&OutputTransformer{
Config: b.Config,
Destroy: true,
},
// Attach the state
&AttachStateTransformer{State: b.State},

View File

@ -11,15 +11,18 @@ import (
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// nodeExpandOutput is the placeholder for an output that has not yet had
// its module path expanded.
type nodeExpandOutput struct {
Addr addrs.OutputValue
Module addrs.Module
Config *configs.Output
Addr addrs.OutputValue
Module addrs.Module
Config *configs.Output
Changes []*plans.OutputChangeSrc
Destroy bool
}
var (
@ -39,16 +42,62 @@ func (n *nodeExpandOutput) temporaryValue() bool {
}
func (n *nodeExpandOutput) DynamicExpand(ctx EvalContext) (*Graph, error) {
if n.Destroy {
return n.planDestroyOutputs(ctx)
}
var g Graph
expander := ctx.InstanceExpander()
for _, module := range expander.ExpandModule(n.Module) {
absAddr := n.Addr.Absolute(module)
// Find any recorded change for this output
var change *plans.OutputChangeSrc
for _, c := range n.Changes {
if c.Addr.String() == absAddr.String() {
change = c
break
}
}
o := &NodeApplyableOutput{
Addr: n.Addr.Absolute(module),
Addr: absAddr,
Config: n.Config,
Change: change,
}
log.Printf("[TRACE] Expanding output: adding %s as %T", o.Addr.String(), o)
g.Add(o)
}
return &g, nil
}
// if we're planing a destroy operation, add destroy nodes for all root outputs
// in the state.
func (n *nodeExpandOutput) planDestroyOutputs(ctx EvalContext) (*Graph, error) {
// we only need to plan destroying root outputs
// Other module outputs may be used during destruction by providers that
// need to interpolate values.
if !n.Module.IsRoot() {
return nil, nil
}
state := ctx.State()
if state == nil {
return nil, nil
}
ms := state.Module(addrs.RootModuleInstance)
var g Graph
for _, output := range ms.OutputValues {
o := &NodeDestroyableOutput{
Addr: output.Addr,
Config: n.Config,
}
log.Printf("[TRACE] Expanding output: adding %s as %T", o.Addr.String(), o)
g.Add(o)
}
return &g, nil
}
@ -108,6 +157,8 @@ func (n *nodeExpandOutput) References() []*addrs.Reference {
type NodeApplyableOutput struct {
Addr addrs.AbsOutputValue
Config *configs.Output // Config is the output in the config
// If this is being evaluated during apply, we may have a change recorded already
Change *plans.OutputChangeSrc
}
var (
@ -199,27 +250,7 @@ func (n *NodeApplyableOutput) References() []*addrs.Reference {
// GraphNodeExecutable
func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) error {
// This has to run before we have a state lock, since evaluation also
// reads the state
val, diags := ctx.EvaluateExpr(n.Config.Expr, cty.DynamicPseudoType, nil)
// We'll handle errors below, after we have loaded the module.
// Outputs don't have a separate mode for validation, so validate
// depends_on expressions here too
diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn))
// Ensure that non-sensitive outputs don't include sensitive values
_, marks := val.UnmarkDeep()
_, hasSensitive := marks["sensitive"]
if !n.Config.Sensitive && hasSensitive {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Output refers to sensitive values",
Detail: "Expressions used in outputs can only refer to sensitive values if the sensitive attribute is true.",
Subject: n.Config.DeclRange.Ptr(),
})
}
var diags tfdiags.Diagnostics
state := ctx.State()
if state == nil {
return nil
@ -227,6 +258,40 @@ func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) error {
changes := ctx.Changes() // may be nil, if we're not working on a changeset
val := cty.UnknownVal(cty.DynamicPseudoType)
changeRecorded := n.Change != nil
// we we have a change recorded, we don't need to re-evaluate if the value
// was known
if changeRecorded {
var err error
val, err = n.Change.After.Decode(cty.DynamicPseudoType)
diags = diags.Append(err)
}
// If there was no change recorded, or the recorded change was not wholly
// known, then we need to re-evaluate the output
if !changeRecorded || !val.IsWhollyKnown() {
// This has to run before we have a state lock, since evaluation also
// reads the state
val, diags = ctx.EvaluateExpr(n.Config.Expr, cty.DynamicPseudoType, nil)
// We'll handle errors below, after we have loaded the module.
// Outputs don't have a separate mode for validation, so validate
// depends_on expressions here too
diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn))
// Ensure that non-sensitive outputs don't include sensitive values
_, marks := val.UnmarkDeep()
_, hasSensitive := marks["sensitive"]
if !n.Config.Sensitive && hasSensitive {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Output refers to sensitive values",
Detail: "Expressions used in outputs can only refer to sensitive values if the sensitive attribute is true.",
Subject: n.Config.DeclRange.Ptr(),
})
}
}
// handling the interpolation error
if diags.HasErrors() {
if flagWarnOutputErrors {
@ -261,7 +326,7 @@ func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNo
}
}
// NodeDestroyableOutput represents an output that is "destroybale":
// NodeDestroyableOutput represents an output that is "destroyable":
// its application will remove the output from the state.
type NodeDestroyableOutput struct {
Addr addrs.AbsOutputValue
@ -293,6 +358,32 @@ func (n *NodeDestroyableOutput) Execute(ctx EvalContext, op walkOperation) error
if state == nil {
return nil
}
changes := ctx.Changes()
if changes != nil {
change := &plans.OutputChange{
Addr: n.Addr,
Change: plans.Change{
// This is just a weird placeholder delete action since
// we don't have an actual prior value to indicate.
// FIXME: Generate real planned changes for output values
// that include the old values.
Action: plans.Delete,
Before: cty.NullVal(cty.DynamicPseudoType),
After: cty.NullVal(cty.DynamicPseudoType),
},
}
cs, err := change.Encode()
if err != nil {
// Should never happen, since we just constructed this right above
panic(fmt.Sprintf("planned change for %s could not be encoded: %s", n.Addr, err))
}
log.Printf("[TRACE] planDestroyOutput: Saving %s change for %s in changeset", change.Action, n.Addr)
changes.RemoveOutputChange(n.Addr) // remove any existing planned change, if present
changes.AppendOutputChange(cs) // add the new planned change
}
state.RemoveOutputValue(n.Addr)
return nil
}

View File

@ -6,6 +6,7 @@ import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/plans"
)
// OutputTransformer is a GraphTransformer that adds all the outputs
@ -15,7 +16,12 @@ import (
// aren't changing since there is no downside: the state will be available
// even if the dependent items aren't changing.
type OutputTransformer struct {
Config *configs.Config
Config *configs.Config
Changes *plans.Changes
// if this is a planed destroy, root outputs are still in the configuration
// so we need to record that we wish to remove them
Destroy bool
}
func (t *OutputTransformer) Transform(g *Graph) error {
@ -40,11 +46,19 @@ func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error {
// Add plannable outputs to the graph, which will be dynamically expanded
// into NodeApplyableOutputs to reflect possible expansion
// through the presence of "count" or "for_each" on the modules.
var changes []*plans.OutputChangeSrc
if t.Changes != nil {
changes = t.Changes.Outputs
}
for _, o := range c.Module.Outputs {
node := &nodeExpandOutput{
Addr: addrs.OutputValue{Name: o.Name},
Module: c.Path,
Config: o,
Addr: addrs.OutputValue{Name: o.Name},
Module: c.Path,
Config: o,
Changes: changes,
Destroy: t.Destroy,
}
log.Printf("[TRACE] OutputTransformer: adding %s as %T", o.Name, node)
g.Add(node)