Merge pull request #26470 from hashicorp/jbardin/inverse-destroy-references

Allow special-case evaluation of instances pending deletion.
This commit is contained in:
James Bardin 2020-10-05 16:20:22 -04:00 committed by GitHub
commit c48af3f18b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 137 additions and 187 deletions

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/hcl/v2/hcldec"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/instances"
"github.com/hashicorp/terraform/lang/blocktoattr"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
@ -69,6 +70,35 @@ func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value,
return val, diags
}
// EvalSelfBlock evaluates the given body only within the scope of the provided
// object and instance key data. References to the object must use self, and the
// key data will only contain count.index or each.key.
func (s *Scope) EvalSelfBlock(body hcl.Body, self cty.Value, schema *configschema.Block, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) {
vals := make(map[string]cty.Value)
vals["self"] = self
if !keyData.CountIndex.IsNull() {
vals["count"] = cty.ObjectVal(map[string]cty.Value{
"index": keyData.CountIndex,
})
}
if !keyData.EachKey.IsNull() {
vals["each"] = cty.ObjectVal(map[string]cty.Value{
"key": keyData.EachKey,
})
}
ctx := &hcl.EvalContext{
Variables: vals,
Functions: s.Functions(),
}
var diags tfdiags.Diagnostics
val, decDiags := hcldec.Decode(body, schema.DecoderSpec(), ctx)
diags = diags.Append(decDiags)
return val, diags
}
// EvalExpr evaluates a single expression in the receiving context and returns
// the resulting value. The value will be converted to the given type before
// it is returned if possible, or else an error diagnostic will be produced

View File

@ -21,6 +21,24 @@ type ChangesSync struct {
changes *Changes
}
// IsFullDestroy returns true if the set of changes indicates we are doing a
// destroy of all resources.
func (cs *ChangesSync) IsFullDestroy() bool {
if cs == nil {
panic("FullDestroy on nil ChangesSync")
}
cs.lock.Lock()
defer cs.lock.Unlock()
for _, c := range cs.changes.Resources {
if c.Action != Delete {
return false
}
}
return true
}
// AppendResourceInstanceChange records the given resource instance change in
// the set of planned resource changes.
//

View File

@ -11389,11 +11389,34 @@ locals {
return testApplyFn(info, s, d)
}
p.DiffFn = testDiffFn
p.PlanResourceChangeFn = func(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
n := r.ProposedNewState.AsValueMap()
if r.PriorState.IsNull() {
n["id"] = cty.UnknownVal(cty.String)
resp.PlannedState = cty.ObjectVal(n)
return resp
}
p := r.PriorState.AsValueMap()
priorRN := p["require_new"]
newRN := n["require_new"]
if eq := priorRN.Equals(newRN); !eq.IsKnown() || eq.False() {
resp.RequiresReplace = []cty.Path{{cty.GetAttrStep{Name: "require_new"}}}
n["id"] = cty.UnknownVal(cty.String)
}
resp.PlannedState = cty.ObjectVal(n)
return resp
}
// reduce the count to 1
ctx := testContext2(t, &ContextOpts{
Variables: InputValues{
"ct": &InputValue{
Value: cty.NumberIntVal(0),
Value: cty.NumberIntVal(1),
SourceType: ValueFromCaller,
},
},
@ -11409,21 +11432,11 @@ locals {
t.Fatal(diags.ErrWithWarnings())
}
// if resource b isn't going to apply correctly, we will get an error about
// an invalid plan value
state, diags = ctx.Apply()
errMsg := diags.ErrWithWarnings().Error()
if strings.Contains(errMsg, "Cycle") {
t.Fatal("test should not produce a cycle:\n", errMsg)
if diags.HasErrors() {
log.Fatal(diags.ErrWithWarnings())
}
if !diags.HasErrors() {
// FIXME: this test is correct, but needs to wait until we no longer
// evaluate resourced that are pending destruction.
t.Fatal("used to error, but now it's fixed!")
}
return
// check the output, as those can't cause an error planning the value
out := state.RootModule().OutputValues["out"].Value.AsString()
if out != "a0" {
@ -11450,8 +11463,6 @@ locals {
t.Fatal(diags.ErrWithWarnings())
}
// if resource b isn't going to apply correctly, we will get an error about
// an invalid plan value
state, diags = ctx.Apply()
if diags.HasErrors() {
t.Fatal(diags.ErrWithWarnings())

View File

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/plans/objchange"
"github.com/hashicorp/terraform/providers"
@ -600,6 +601,18 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
instanceAddr := n.Addr
absAddr := instanceAddr.Absolute(ctx.Path())
// this self is only used for destroy provisioner evaluation, and must
// refer to the last known value of the resource.
self := (*n.State).Value
var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics)
switch n.When {
case configs.ProvisionerWhenDestroy:
evalScope = n.evalDestroyProvisionerConfig
default:
evalScope = n.evalProvisionerConfig
}
// If there's a connection block defined directly inside the resource block
// then it'll serve as a base connection configuration for all of the
// provisioners.
@ -615,25 +628,8 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
provisioner := ctx.Provisioner(prov.Type)
schema := ctx.ProvisionerSchema(prov.Type)
var forEach map[string]cty.Value
// For a destroy-time provisioner forEach is intentionally nil here,
// which EvalDataForInstanceKey responds to by not populating EachValue
// in its result. That's okay because each.value is prohibited for
// destroy-time provisioners.
if n.When != configs.ProvisionerWhenDestroy {
m, forEachDiags := evaluateForEachExpression(n.ResourceConfig.ForEach, ctx)
diags = diags.Append(forEachDiags)
forEach = m
}
keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach)
// Evaluate the main provisioner configuration.
config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData)
config, configDiags := evalScope(ctx, prov.Config, self, schema)
diags = diags.Append(configDiags)
// we can't apply the provisioner if the config has errors
if diags.HasErrors() {
return diags.Err()
}
@ -664,11 +660,9 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
if connBody != nil {
var connInfoDiags tfdiags.Diagnostics
connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData)
connInfo, connInfoDiags = evalScope(ctx, connBody, self, connectionBlockSupersetSchema)
diags = diags.Append(connInfoDiags)
if diags.HasErrors() {
// "on failure continue" setting only applies to failures of the
// provisioner itself, not to invalid configuration.
return diags.Err()
}
}
@ -728,3 +722,34 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
return diags.ErrWithWarnings()
}
func (n *EvalApplyProvisioners) evalProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
forEach, forEachDiags := evaluateForEachExpression(n.ResourceConfig.ForEach, ctx)
diags = diags.Append(forEachDiags)
keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
config, _, configDiags := ctx.EvaluateBlock(body, schema, n.Addr, keyData)
diags = diags.Append(configDiags)
return config, diags
}
// during destroy a provisioner can only evaluate within the scope of the parent resource
func (n *EvalApplyProvisioners) evalDestroyProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
// For a destroy-time provisioner forEach is intentionally nil here,
// which EvalDataForInstanceKey responds to by not populating EachValue
// in its result. That's okay because each.value is prohibited for
// destroy-time provisioners.
keyData := EvalDataForInstanceKey(n.Addr.Key, nil)
evalScope := ctx.EvaluationScope(n.Addr, keyData)
config, evalDiags := evalScope.EvalSelfBlock(body, self, schema, keyData)
diags = diags.Append(evalDiags)
return config, diags
}

View File

@ -695,6 +695,7 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc
// Decode all instances in the current state
instances := map[addrs.InstanceKey]cty.Value{}
pendingDestroy := d.Evaluator.Changes.IsFullDestroy()
for key, is := range rs.Instances {
if is == nil || is.Current == nil {
// Assume we're dealing with an instance that hasn't been created yet.
@ -711,18 +712,9 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc
// instances will be in the state, as they are not destroyed until
// after their dependants are updated.
if change.Action == plans.Delete {
// FIXME: we should not be evaluating resources that are going
// to be destroyed, but this needs to happen always since
// destroy-time provisioners need to reference their self
// value, and providers need to evaluate their configuration
// during a full destroy, even of they depend on resources
// being destroyed.
//
// Since this requires a special transformer to try and fixup
// the order of evaluation when possible, reference it here to
// ensure that we remove the transformer when this is fixed.
_ = GraphTransformer((*applyDestroyNodeReferenceFixupTransformer)(nil))
// continue
if !pendingDestroy {
continue
}
}
}

View File

@ -185,10 +185,6 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
&CloseProviderTransformer{},
&CloseProvisionerTransformer{},
// Add destroy node reference edges where needed, until we can fix
// full-destroy evaluation.
&applyDestroyNodeReferenceFixupTransformer{},
// close the root module
&CloseRootModuleTransformer{},

View File

@ -352,6 +352,18 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
return err
}
// We clear the change out here so that future nodes don't see a change
// that is already complete.
writeDiff := &EvalWriteDiff{
Addr: addr,
ProviderSchema: &providerSchema,
Change: nil,
}
_, err = writeDiff.Eval(ctx)
if err != nil {
return err
}
evalMaybeTainted := &EvalMaybeTainted{
Addr: addr,
State: &state,
@ -423,20 +435,6 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
// We clear the diff out here so that future nodes don't see a diff that is
// already complete. There is no longer a diff!
if !diff.Action.IsReplace() || !n.CreateBeforeDestroy() {
writeDiff := &EvalWriteDiff{
Addr: addr,
ProviderSchema: &providerSchema,
Change: nil,
}
_, err := writeDiff.Eval(ctx)
if err != nil {
return err
}
}
applyPost := &EvalApplyPost{
Addr: addr,
State: &state,

View File

@ -552,123 +552,3 @@ func modulePrefixList(result []string, prefix string) []string {
return result
}
// destroyNodeReferenceFixupTransformer is a GraphTransformer that connects all
// temporary values to any destroy instances of their references. This ensures
// that they are evaluated after the destroy operations of all instances, since
// the evaluator will currently return data from instances that are scheduled
// for deletion.
//
// This breaks the rules that destroy nodes are not referencable, and can cause
// cycles in the current graph structure. The cycles however are usually caused
// by passing through a provider node, and that is the specific case we do not
// want to wait for destroy evaluation since the evaluation result may need to
// be used in the provider for a full destroy operation.
//
// Once the evaluator can again ignore any instances scheduled for deletion,
// this transformer should be removed.
type applyDestroyNodeReferenceFixupTransformer struct{}
func (t *applyDestroyNodeReferenceFixupTransformer) Transform(g *Graph) error {
// Create mapping of destroy nodes by address.
// Because the values which are providing the references won't yet be
// expanded, we need to index these by configuration address, rather than
// absolute.
destroyers := map[string][]dag.Vertex{}
for _, v := range g.Vertices() {
if v, ok := v.(GraphNodeDestroyer); ok {
addr := v.DestroyAddr().ContainingResource().Config().String()
destroyers[addr] = append(destroyers[addr], v)
}
}
_ = destroyers
// nothing being destroyed
if len(destroyers) == 0 {
return nil
}
// Now find any temporary values (variables, locals, outputs) that might
// reference the resources with instances being destroyed.
for _, v := range g.Vertices() {
rn, ok := v.(GraphNodeReferencer)
if !ok {
continue
}
// we only want temporary value referencers
if _, ok := v.(graphNodeTemporaryValue); !ok {
continue
}
modulePath := rn.ModulePath()
// If this value is possibly consumed by a provider configuration, we
// must attempt to evaluate early during a full destroy, and cannot
// wait on the resource destruction. This would also likely cause a
// cycle in most configurations.
des, _ := g.Descendents(rn)
providerDescendant := false
for _, v := range des {
if _, ok := v.(GraphNodeProvider); ok {
providerDescendant = true
break
}
}
if providerDescendant {
log.Printf("[WARN] Value %q has provider descendant, not waiting on referenced destroy instance", dag.VertexName(rn))
continue
}
refs := rn.References()
for _, ref := range refs {
var addr addrs.ConfigResource
// get the configuration level address for this reference, since
// that is how we indexed the destroyers
switch tr := ref.Subject.(type) {
case addrs.Resource:
addr = addrs.ConfigResource{
Module: modulePath,
Resource: tr,
}
case addrs.ResourceInstance:
addr = addrs.ConfigResource{
Module: modulePath,
Resource: tr.ContainingResource(),
}
default:
// this is not a resource reference
continue
}
// see if there are any destroyers registered for this address
for _, dest := range destroyers[addr.String()] {
// check that we are not introducing a cycle, by looking for
// our own node in the ancestors of the destroy node.
// This should theoretically only happen if we had a provider
// descendant which was checked already, but since this edge is
// being added outside the normal rules of the graph, check
// again to be certain.
anc, _ := g.Ancestors(dest)
cycle := false
for _, a := range anc {
if a == rn {
log.Printf("[WARN] Not adding fixup edge %q->%q which introduces a cycle", dag.VertexName(rn), dag.VertexName(dest))
cycle = true
break
}
}
if cycle {
continue
}
log.Printf("[DEBUG] adding fixup edge %q->%q to prevent destroy node evaluation", dag.VertexName(rn), dag.VertexName(dest))
g.Connect(dag.BasicEdge(rn, dest))
}
}
}
return nil
}