core: -replace to emit only one warning for incomplete address

If the user gives an index-less address for a resource that expects
instance keys then previously we would've emitted one error per declared
instance of the resource, which is overwhelming and not especially
helpful.

Instead, we'll deal with that check prior to expanding resources into
resource instances, and thus we can report a single error which talks
about all of the instances at once.

This does unfortunately come at the expense of splitting the logic for
dealing with the "force replace" addresses into two places, which will
likely make later maintenance harder. In an attempt to mitigate that,
I've included a comment in each place that mentions the other place, which
hopefully future maintainers will keep up-to-date if that situation
changes.
This commit is contained in:
Martin Atkins 2021-04-30 16:55:53 -07:00
parent 1d3e34e35e
commit c63c06d3c4
2 changed files with 84 additions and 17 deletions

View File

@ -824,22 +824,12 @@ func (n *NodeAbstractResourceInstance) plan(
matchedForceReplace = true
break
}
// For "force replace" purposes we require an exact resource instance
// address to match, but just in case a user forgets to include the
// instance key for a multi-instance resource we'll give them a
// warning hint.
if n.Addr.Resource.Key != addrs.NoKey && candidateAddr.Resource.Key == addrs.NoKey {
if n.Addr.Resource.Resource.Equal(candidateAddr.Resource.Resource) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s didn't match %s because it lacks the instance key.\n\nTo force replacement of this particular instance, use -replace=%q .",
candidateAddr, n.Addr, n.Addr,
),
))
}
}
// address to match. If a user forgets to include the instance key
// for a multi-instance resource then it won't match here, but we
// have an earlier check in NodePlannableResource.Execute that should
// prevent us from getting here in that case.
}
// Unmark for this test for value equality.

View File

@ -1,7 +1,9 @@
package terraform
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/dag"
@ -194,13 +196,88 @@ func (n *NodePlannableResource) Name() string {
// GraphNodeExecutable
func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
if n.Config == nil {
// Nothing to do, then.
log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name())
return nil
return diags
}
return n.writeResourceState(ctx, n.Addr)
// writeResourceState is responsible for informing the expander of what
// repetition mode this resource has, which allows expander.ExpandResource
// to work below.
moreDiags := n.writeResourceState(ctx, n.Addr)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return diags
}
// Before we expand our resource into potentially many resource instances,
// we'll verify that any mention of this resource in n.forceReplace is
// consistent with the repetition mode of the resource. In other words,
// we're aiming to catch a situation where naming a particular resource
// instance would require an instance key but the given address has none.
expander := ctx.InstanceExpander()
instanceAddrs := expander.ExpandResource(n.ResourceAddr().Absolute(ctx.Path()))
// If there's a number of instances other than 1 then we definitely need
// an index.
mustHaveIndex := len(instanceAddrs) != 1
// If there's only one instance then we might still need an index, if the
// instance address has one.
if len(instanceAddrs) == 1 && instanceAddrs[0].Resource.Key != addrs.NoKey {
mustHaveIndex = true
}
if mustHaveIndex {
for _, candidateAddr := range n.forceReplace {
if candidateAddr.Resource.Key == addrs.NoKey {
if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) {
switch {
case len(instanceAddrs) == 0:
// In this case there _are_ no instances to replace, so
// there isn't any alternative address for us to suggest.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.",
candidateAddr,
),
))
case len(instanceAddrs) == 1:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q",
candidateAddr, instanceAddrs[0],
),
))
default:
var possibleValidOptions strings.Builder
for _, addr := range instanceAddrs {
fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr)
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Incompletely-matched force-replace resource instance",
fmt.Sprintf(
"Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s",
candidateAddr, possibleValidOptions.String(),
),
))
}
}
}
}
}
// NOTE: The actual interpretation of n.forceReplace to produce replace
// actions is in NodeAbstractResourceInstance.plan, because we must do so
// on a per-instance basis rather than for the whole resource.
return diags
}
// GraphNodeDestroyerCBD