From fa8f8df7b6a0d9188b28968b7e7d592342aa6879 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 1 Oct 2020 17:08:25 -0400 Subject: [PATCH 1/8] add ChangesSync.FullDestroy In order to handle various edge cases during a full destroy, add FullDestroy to the synchronized changes so we can attempt to deduce if the plan was created from `terraform destroy`. It's possible that the plan was created by removing all resourced from the configuration, but in that case the end result is the same. Any of the edge cases with provider or destroy provisioner configurations would not apply, since there would not be any configuration references to resolve. --- plans/changes_sync.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/plans/changes_sync.go b/plans/changes_sync.go index 615f85392..5e735c09a 100644 --- a/plans/changes_sync.go +++ b/plans/changes_sync.go @@ -21,6 +21,24 @@ type ChangesSync struct { changes *Changes } +// FullDestroy returns true if the set of changes indicates we are doing a +// destroy of all resources. +func (cs *ChangesSync) FullDestroy() bool { + if cs == nil { + panic("FullDestroy on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + for _, c := range cs.changes.Resources { + if c.Action != Delete { + return false + } + } + + return true +} + // AppendResourceInstanceChange records the given resource instance change in // the set of planned resource changes. // From 07af1c6225647ed3f4f85f574db802b7855f4e78 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 1 Oct 2020 17:11:46 -0400 Subject: [PATCH 2/8] destroy time eval of resources pending deletion Allow the evaluation of resource pending deleting only during a full destroy. With this change we can ensure deposed instances are not evaluated under normal circumstances, but can be references when needed. This also allows us to remove the fixup transformer that added connections so temporary values would evaluate in the correct order when referencing destroy nodes. In the majority of cases, we do not want to evaluate resources that are pending deletion since configuration references only can refer to resources that is intended to be managed by the configuration. An exception to that rule is when Terraform is performing a full `destroy` operation, and providers need to evaluate existing resources for their configuration. --- terraform/evaluate.go | 21 +++--- terraform/graph_builder_apply.go | 4 -- terraform/transform_reference.go | 120 ------------------------------- 3 files changed, 9 insertions(+), 136 deletions(-) diff --git a/terraform/evaluate.go b/terraform/evaluate.go index 5f5c944e6..2b9211288 100644 --- a/terraform/evaluate.go +++ b/terraform/evaluate.go @@ -621,6 +621,11 @@ func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.Sourc } } +// Try to guess if we have a pending destroy for all resources. +func (s *evaluationStateData) pendingDestroy() bool { + return true +} + func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // First we'll consult the configuration to see if an resource of this @@ -695,6 +700,7 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc // Decode all instances in the current state instances := map[addrs.InstanceKey]cty.Value{} + pendingDestroy := d.Evaluator.Changes.FullDestroy() for key, is := range rs.Instances { if is == nil || is.Current == nil { // Assume we're dealing with an instance that hasn't been created yet. @@ -711,18 +717,9 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc // instances will be in the state, as they are not destroyed until // after their dependants are updated. if change.Action == plans.Delete { - // FIXME: we should not be evaluating resources that are going - // to be destroyed, but this needs to happen always since - // destroy-time provisioners need to reference their self - // value, and providers need to evaluate their configuration - // during a full destroy, even of they depend on resources - // being destroyed. - // - // Since this requires a special transformer to try and fixup - // the order of evaluation when possible, reference it here to - // ensure that we remove the transformer when this is fixed. - _ = GraphTransformer((*applyDestroyNodeReferenceFixupTransformer)(nil)) - // continue + if !pendingDestroy { + continue + } } } diff --git a/terraform/graph_builder_apply.go b/terraform/graph_builder_apply.go index 2221c526c..b4e573142 100644 --- a/terraform/graph_builder_apply.go +++ b/terraform/graph_builder_apply.go @@ -185,10 +185,6 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { &CloseProviderTransformer{}, &CloseProvisionerTransformer{}, - // Add destroy node reference edges where needed, until we can fix - // full-destroy evaluation. - &applyDestroyNodeReferenceFixupTransformer{}, - // close the root module &CloseRootModuleTransformer{}, diff --git a/terraform/transform_reference.go b/terraform/transform_reference.go index 44aae0fc8..110612e14 100644 --- a/terraform/transform_reference.go +++ b/terraform/transform_reference.go @@ -540,123 +540,3 @@ func modulePrefixList(result []string, prefix string) []string { return result } - -// destroyNodeReferenceFixupTransformer is a GraphTransformer that connects all -// temporary values to any destroy instances of their references. This ensures -// that they are evaluated after the destroy operations of all instances, since -// the evaluator will currently return data from instances that are scheduled -// for deletion. -// -// This breaks the rules that destroy nodes are not referencable, and can cause -// cycles in the current graph structure. The cycles however are usually caused -// by passing through a provider node, and that is the specific case we do not -// want to wait for destroy evaluation since the evaluation result may need to -// be used in the provider for a full destroy operation. -// -// Once the evaluator can again ignore any instances scheduled for deletion, -// this transformer should be removed. -type applyDestroyNodeReferenceFixupTransformer struct{} - -func (t *applyDestroyNodeReferenceFixupTransformer) Transform(g *Graph) error { - // Create mapping of destroy nodes by address. - // Because the values which are providing the references won't yet be - // expanded, we need to index these by configuration address, rather than - // absolute. - destroyers := map[string][]dag.Vertex{} - for _, v := range g.Vertices() { - if v, ok := v.(GraphNodeDestroyer); ok { - addr := v.DestroyAddr().ContainingResource().Config().String() - destroyers[addr] = append(destroyers[addr], v) - } - } - _ = destroyers - - // nothing being destroyed - if len(destroyers) == 0 { - return nil - } - - // Now find any temporary values (variables, locals, outputs) that might - // reference the resources with instances being destroyed. - for _, v := range g.Vertices() { - rn, ok := v.(GraphNodeReferencer) - if !ok { - continue - } - - // we only want temporary value referencers - if _, ok := v.(graphNodeTemporaryValue); !ok { - continue - } - - modulePath := rn.ModulePath() - - // If this value is possibly consumed by a provider configuration, we - // must attempt to evaluate early during a full destroy, and cannot - // wait on the resource destruction. This would also likely cause a - // cycle in most configurations. - des, _ := g.Descendents(rn) - providerDescendant := false - for _, v := range des { - if _, ok := v.(GraphNodeProvider); ok { - providerDescendant = true - break - } - } - - if providerDescendant { - log.Printf("[WARN] Value %q has provider descendant, not waiting on referenced destroy instance", dag.VertexName(rn)) - continue - } - - refs := rn.References() - for _, ref := range refs { - - var addr addrs.ConfigResource - // get the configuration level address for this reference, since - // that is how we indexed the destroyers - switch tr := ref.Subject.(type) { - case addrs.Resource: - addr = addrs.ConfigResource{ - Module: modulePath, - Resource: tr, - } - case addrs.ResourceInstance: - addr = addrs.ConfigResource{ - Module: modulePath, - Resource: tr.ContainingResource(), - } - default: - // this is not a resource reference - continue - } - - // see if there are any destroyers registered for this address - for _, dest := range destroyers[addr.String()] { - // check that we are not introducing a cycle, by looking for - // our own node in the ancestors of the destroy node. - // This should theoretically only happen if we had a provider - // descendant which was checked already, but since this edge is - // being added outside the normal rules of the graph, check - // again to be certain. - anc, _ := g.Ancestors(dest) - cycle := false - for _, a := range anc { - if a == rn { - log.Printf("[WARN] Not adding fixup edge %q->%q which introduces a cycle", dag.VertexName(rn), dag.VertexName(dest)) - cycle = true - break - } - } - if cycle { - continue - } - - log.Printf("[DEBUG] adding fixup edge %q->%q to prevent destroy node evaluation", dag.VertexName(rn), dag.VertexName(dest)) - g.Connect(dag.BasicEdge(rn, dest)) - } - } - } - - return nil -} From 43c05252774e7cece81a07443961f07fd64c9bd7 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 2 Oct 2020 08:50:24 -0400 Subject: [PATCH 3/8] fix the test that was supposed to break The test for this behavior did not work, because the old mock diff function does not work correctly. Write a PlanResourceChange function to return a correct plan. --- terraform/context_apply_test.go | 43 +++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index 4596ae09d..8ce875a63 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -11389,11 +11389,34 @@ locals { return testApplyFn(info, s, d) } - p.DiffFn = testDiffFn + p.PlanResourceChangeFn = func(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + n := r.ProposedNewState.AsValueMap() + + if r.PriorState.IsNull() { + n["id"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(n) + return resp + } + + p := r.PriorState.AsValueMap() + + priorRN := p["require_new"] + newRN := n["require_new"] + + if eq := priorRN.Equals(newRN); !eq.IsKnown() || eq.False() { + resp.RequiresReplace = []cty.Path{{cty.GetAttrStep{Name: "require_new"}}} + n["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(n) + return resp + } + + // reduce the count to 1 ctx := testContext2(t, &ContextOpts{ Variables: InputValues{ "ct": &InputValue{ - Value: cty.NumberIntVal(0), + Value: cty.NumberIntVal(1), SourceType: ValueFromCaller, }, }, @@ -11409,21 +11432,11 @@ locals { t.Fatal(diags.ErrWithWarnings()) } - // if resource b isn't going to apply correctly, we will get an error about - // an invalid plan value state, diags = ctx.Apply() - errMsg := diags.ErrWithWarnings().Error() - if strings.Contains(errMsg, "Cycle") { - t.Fatal("test should not produce a cycle:\n", errMsg) + if diags.HasErrors() { + log.Fatal(diags.ErrWithWarnings()) } - if !diags.HasErrors() { - // FIXME: this test is correct, but needs to wait until we no longer - // evaluate resourced that are pending destruction. - t.Fatal("used to error, but now it's fixed!") - } - return - // check the output, as those can't cause an error planning the value out := state.RootModule().OutputValues["out"].Value.AsString() if out != "a0" { @@ -11450,8 +11463,6 @@ locals { t.Fatal(diags.ErrWithWarnings()) } - // if resource b isn't going to apply correctly, we will get an error about - // an invalid plan value state, diags = ctx.Apply() if diags.HasErrors() { t.Fatal(diags.ErrWithWarnings()) From 64c3a8f55091f52fc2eb043fb5fca39ac1f43557 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 2 Oct 2020 11:25:21 -0400 Subject: [PATCH 4/8] lang.Scope.EvalSelfBlock In order to properly evaluate a destroy provisioner, we cannot rely on the usual evaluation context, because the resource has already been removed from the state. EvalSelfBlock evaluates an hcl.Body in the limited scope of a single object as "self", with the added values of "count.index" and "each.key". --- lang/eval.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lang/eval.go b/lang/eval.go index db5a15a24..381ec4288 100644 --- a/lang/eval.go +++ b/lang/eval.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/instances" "github.com/hashicorp/terraform/lang/blocktoattr" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" @@ -69,6 +70,35 @@ func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, return val, diags } +// EvalSelfBlock evaluates the given body only within the scope of the provided +// object and instance key data. References to the object must use self, and the +// key data will only contain count.index or each.key. +func (s *Scope) EvalSelfBlock(body hcl.Body, self cty.Value, schema *configschema.Block, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { + vals := make(map[string]cty.Value) + vals["self"] = self + + if !keyData.CountIndex.IsNull() { + vals["count"] = cty.ObjectVal(map[string]cty.Value{ + "index": keyData.CountIndex, + }) + } + if !keyData.EachKey.IsNull() { + vals["each"] = cty.ObjectVal(map[string]cty.Value{ + "key": keyData.EachKey, + }) + } + + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: s.Functions(), + } + + var diags tfdiags.Diagnostics + val, decDiags := hcldec.Decode(body, schema.DecoderSpec(), ctx) + diags = diags.Append(decDiags) + return val, diags +} + // EvalExpr evaluates a single expression in the receiving context and returns // the resulting value. The value will be converted to the given type before // it is returned if possible, or else an error diagnostic will be produced From 95197f0324ff718b794faa543e2a97291d625c4a Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 2 Oct 2020 11:54:58 -0400 Subject: [PATCH 5/8] use EvalSelfBlock for destroy provisioners Evaluate destroy provisioner configurations using only the last resource state value, and the static instance key data. --- terraform/eval_apply.go | 70 ++++++++++++++++------- terraform/node_resource_apply_instance.go | 27 +++++---- terraform/node_resource_destroy.go | 1 + 3 files changed, 63 insertions(+), 35 deletions(-) diff --git a/terraform/eval_apply.go b/terraform/eval_apply.go index 90040c84c..14da88ab1 100644 --- a/terraform/eval_apply.go +++ b/terraform/eval_apply.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/plans/objchange" "github.com/hashicorp/terraform/providers" @@ -504,6 +505,9 @@ type EvalApplyProvisioners struct { // When is the type of provisioner to run at this point When configs.ProvisionerWhen + + // We use the stored change to get the previous resource values in the case of a destroy provisioner + Change *plans.ResourceInstanceChange } // TODO: test @@ -600,6 +604,18 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio instanceAddr := n.Addr absAddr := instanceAddr.Absolute(ctx.Path()) + // this self is only used for destroy provisioner evaluation, and must + // refer to the last known value of the resource. + self := n.Change.Before + + var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics) + switch n.When { + case configs.ProvisionerWhenDestroy: + evalScope = n.evalDestroyProvisionerConfig + default: + evalScope = n.evalProvisionerConfig + } + // If there's a connection block defined directly inside the resource block // then it'll serve as a base connection configuration for all of the // provisioners. @@ -615,25 +631,8 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio provisioner := ctx.Provisioner(prov.Type) schema := ctx.ProvisionerSchema(prov.Type) - var forEach map[string]cty.Value - - // For a destroy-time provisioner forEach is intentionally nil here, - // which EvalDataForInstanceKey responds to by not populating EachValue - // in its result. That's okay because each.value is prohibited for - // destroy-time provisioners. - if n.When != configs.ProvisionerWhenDestroy { - m, forEachDiags := evaluateForEachExpression(n.ResourceConfig.ForEach, ctx) - diags = diags.Append(forEachDiags) - forEach = m - } - - keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) - - // Evaluate the main provisioner configuration. - config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) + config, configDiags := evalScope(ctx, prov.Config, self, schema) diags = diags.Append(configDiags) - - // we can't apply the provisioner if the config has errors if diags.HasErrors() { return diags.Err() } @@ -664,11 +663,9 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio if connBody != nil { var connInfoDiags tfdiags.Diagnostics - connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) + connInfo, connInfoDiags = evalScope(ctx, connBody, self, connectionBlockSupersetSchema) diags = diags.Append(connInfoDiags) if diags.HasErrors() { - // "on failure continue" setting only applies to failures of the - // provisioner itself, not to invalid configuration. return diags.Err() } } @@ -728,3 +725,34 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio return diags.ErrWithWarnings() } + +func (n *EvalApplyProvisioners) evalProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + forEach, forEachDiags := evaluateForEachExpression(n.ResourceConfig.ForEach, ctx) + diags = diags.Append(forEachDiags) + + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + + config, _, configDiags := ctx.EvaluateBlock(body, schema, n.Addr, keyData) + diags = diags.Append(configDiags) + + return config, diags +} + +// during destroy a provisioner can only evaluate within the scope of the parent resource +func (n *EvalApplyProvisioners) evalDestroyProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // For a destroy-time provisioner forEach is intentionally nil here, + // which EvalDataForInstanceKey responds to by not populating EachValue + // in its result. That's okay because each.value is prohibited for + // destroy-time provisioners. + keyData := EvalDataForInstanceKey(n.Addr.Key, nil) + + evalScope := ctx.EvaluationScope(n.Addr, keyData) + config, evalDiags := evalScope.EvalSelfBlock(body, self, schema, keyData) + diags = diags.Append(evalDiags) + + return config, diags +} diff --git a/terraform/node_resource_apply_instance.go b/terraform/node_resource_apply_instance.go index 0c77cd4e8..5a0dc61e8 100644 --- a/terraform/node_resource_apply_instance.go +++ b/terraform/node_resource_apply_instance.go @@ -352,6 +352,18 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) return err } + // We clear the change out here so that future nodes don't see a change + // that is already complete. + writeDiff := &EvalWriteDiff{ + Addr: addr, + ProviderSchema: &providerSchema, + Change: nil, + } + _, err = writeDiff.Eval(ctx) + if err != nil { + return err + } + evalMaybeTainted := &EvalMaybeTainted{ Addr: addr, State: &state, @@ -382,6 +394,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) CreateNew: &createNew, Error: &applyError, When: configs.ProvisionerWhenCreate, + Change: diffApply, } _, err = applyProvisioners.Eval(ctx) if err != nil { @@ -423,20 +436,6 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) } } - // We clear the diff out here so that future nodes don't see a diff that is - // already complete. There is no longer a diff! - if !diff.Action.IsReplace() || !n.CreateBeforeDestroy() { - writeDiff := &EvalWriteDiff{ - Addr: addr, - ProviderSchema: &providerSchema, - Change: nil, - } - _, err := writeDiff.Eval(ctx) - if err != nil { - return err - } - } - applyPost := &EvalApplyPost{ Addr: addr, State: &state, diff --git a/terraform/node_resource_destroy.go b/terraform/node_resource_destroy.go index c54bc0cec..386d0fec2 100644 --- a/terraform/node_resource_destroy.go +++ b/terraform/node_resource_destroy.go @@ -193,6 +193,7 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) ResourceConfig: n.Config, Error: &provisionerErr, When: configs.ProvisionerWhenDestroy, + Change: changeApply, } _, err := evalApplyProvisioners.Eval(ctx) if err != nil { From a1181adca4804b16b4bbee7a68c78bda96b78afb Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 5 Oct 2020 10:35:31 -0400 Subject: [PATCH 6/8] remove unused method stub --- terraform/evaluate.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/terraform/evaluate.go b/terraform/evaluate.go index 2b9211288..7bf7c9112 100644 --- a/terraform/evaluate.go +++ b/terraform/evaluate.go @@ -621,11 +621,6 @@ func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.Sourc } } -// Try to guess if we have a pending destroy for all resources. -func (s *evaluationStateData) pendingDestroy() bool { - return true -} - func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // First we'll consult the configuration to see if an resource of this From e35524c7f0590c7c0583f8a94f8bd8b76f3f2140 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 5 Oct 2020 10:40:14 -0400 Subject: [PATCH 7/8] use existing State rather than Change.Before The change was passed into the provisioner node because the normal NodeApplyableResourceInstance overwrites the prior state with the new state. This however doesn't matter here, because the resource destroy node does not do this. Also, even if the updated state were to be used for some reason with a create provisioner, it would be the correct state to use at that point. --- terraform/eval_apply.go | 5 +---- terraform/node_resource_apply_instance.go | 1 - terraform/node_resource_destroy.go | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/terraform/eval_apply.go b/terraform/eval_apply.go index 14da88ab1..c5b6a4cad 100644 --- a/terraform/eval_apply.go +++ b/terraform/eval_apply.go @@ -505,9 +505,6 @@ type EvalApplyProvisioners struct { // When is the type of provisioner to run at this point When configs.ProvisionerWhen - - // We use the stored change to get the previous resource values in the case of a destroy provisioner - Change *plans.ResourceInstanceChange } // TODO: test @@ -606,7 +603,7 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio // this self is only used for destroy provisioner evaluation, and must // refer to the last known value of the resource. - self := n.Change.Before + self := (*n.State).Value var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics) switch n.When { diff --git a/terraform/node_resource_apply_instance.go b/terraform/node_resource_apply_instance.go index 5a0dc61e8..a1d35b81a 100644 --- a/terraform/node_resource_apply_instance.go +++ b/terraform/node_resource_apply_instance.go @@ -394,7 +394,6 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) CreateNew: &createNew, Error: &applyError, When: configs.ProvisionerWhenCreate, - Change: diffApply, } _, err = applyProvisioners.Eval(ctx) if err != nil { diff --git a/terraform/node_resource_destroy.go b/terraform/node_resource_destroy.go index 386d0fec2..c54bc0cec 100644 --- a/terraform/node_resource_destroy.go +++ b/terraform/node_resource_destroy.go @@ -193,7 +193,6 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) ResourceConfig: n.Config, Error: &provisionerErr, When: configs.ProvisionerWhenDestroy, - Change: changeApply, } _, err := evalApplyProvisioners.Eval(ctx) if err != nil { From 0c72c6f1448555bc02b99c6eb87bcf2d5e0c1a6a Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 5 Oct 2020 10:50:25 -0400 Subject: [PATCH 8/8] s/FullDestroy/IsFullDdestroy/ --- plans/changes_sync.go | 4 ++-- terraform/evaluate.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plans/changes_sync.go b/plans/changes_sync.go index 5e735c09a..0a69dc215 100644 --- a/plans/changes_sync.go +++ b/plans/changes_sync.go @@ -21,9 +21,9 @@ type ChangesSync struct { changes *Changes } -// FullDestroy returns true if the set of changes indicates we are doing a +// IsFullDestroy returns true if the set of changes indicates we are doing a // destroy of all resources. -func (cs *ChangesSync) FullDestroy() bool { +func (cs *ChangesSync) IsFullDestroy() bool { if cs == nil { panic("FullDestroy on nil ChangesSync") } diff --git a/terraform/evaluate.go b/terraform/evaluate.go index 7bf7c9112..0647bb68e 100644 --- a/terraform/evaluate.go +++ b/terraform/evaluate.go @@ -695,7 +695,7 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc // Decode all instances in the current state instances := map[addrs.InstanceKey]cty.Value{} - pendingDestroy := d.Evaluator.Changes.FullDestroy() + pendingDestroy := d.Evaluator.Changes.IsFullDestroy() for key, is := range rs.Instances { if is == nil || is.Current == nil { // Assume we're dealing with an instance that hasn't been created yet.