2017-01-19 05:47:56 +01:00
package local
import (
2017-09-01 04:19:06 +02:00
"bytes"
2017-01-19 05:47:56 +01:00
"context"
"fmt"
"log"
"os"
"strings"
2018-03-21 02:43:02 +01:00
"github.com/hashicorp/terraform/tfdiags"
2017-01-19 05:47:56 +01:00
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/command/format"
"github.com/hashicorp/terraform/terraform"
)
func ( b * Local ) opPlan (
2018-02-10 00:10:52 +01:00
stopCtx context . Context ,
cancelCtx context . Context ,
2017-01-19 05:47:56 +01:00
op * backend . Operation ,
runningOp * backend . RunningOperation ) {
2018-03-21 02:43:02 +01:00
2017-01-19 05:47:56 +01:00
log . Printf ( "[INFO] backend/local: starting Plan operation" )
2018-03-21 02:43:02 +01:00
var diags tfdiags . Diagnostics
2017-01-19 05:47:56 +01:00
2018-03-21 02:43:02 +01:00
if b . CLI != nil && op . Plan != nil {
diags = diags . Append ( tfdiags . Sourceless (
tfdiags . Error ,
"Can't re-plan a saved plan" ,
"The plan command was given a saved plan file as its input. This command generates a new plan, and so it requires a configuration directory as its argument." ,
) )
b . ReportResult ( runningOp , diags )
2017-01-30 04:51:54 +01:00
return
}
2018-03-21 02:43:02 +01:00
// Local planning requires a config, unless we're planning to destroy.
if ! op . Destroy && ! op . HasConfig ( ) {
diags = diags . Append ( tfdiags . Sourceless (
tfdiags . Error ,
"No configuration files" ,
"Plan requires configuration to be present. Planning without a configuration would mark everything for destruction, which is normally not what is desired. If you would like to destroy everything, run plan with the -destroy option. Otherwise, create a Terraform configuration file (.tf file) and try again." ,
) )
b . ReportResult ( runningOp , diags )
return
2017-01-30 04:51:54 +01:00
}
2017-01-19 05:47:56 +01:00
// Setup our count hook that keeps track of resource changes
countHook := new ( CountHook )
if b . ContextOpts == nil {
b . ContextOpts = new ( terraform . ContextOpts )
}
old := b . ContextOpts . Hooks
defer func ( ) { b . ContextOpts . Hooks = old } ( )
b . ContextOpts . Hooks = append ( b . ContextOpts . Hooks , countHook )
// Get our context
2017-02-02 00:16:16 +01:00
tfCtx , opState , err := b . context ( op )
2017-01-19 05:47:56 +01:00
if err != nil {
2018-03-21 02:43:02 +01:00
diags = diags . Append ( err )
b . ReportResult ( runningOp , diags )
2017-01-19 05:47:56 +01:00
return
}
// Setup the state
runningOp . State = tfCtx . State ( )
// If we're refreshing before plan, perform that
if op . PlanRefresh {
log . Printf ( "[INFO] backend/local: plan calling Refresh" )
2017-01-19 06:41:59 +01:00
if b . CLI != nil {
b . CLI . Output ( b . Colorize ( ) . Color ( strings . TrimSpace ( planRefreshing ) + "\n" ) )
}
2017-01-19 05:47:56 +01:00
_ , err := tfCtx . Refresh ( )
if err != nil {
2018-03-21 02:43:02 +01:00
diags = diags . Append ( err )
b . ReportResult ( runningOp , diags )
2017-01-19 05:47:56 +01:00
return
}
2017-09-01 04:19:06 +02:00
if b . CLI != nil {
b . CLI . Output ( "\n------------------------------------------------------------------------" )
}
2017-01-19 05:47:56 +01:00
}
2017-12-02 23:31:28 +01:00
// Perform the plan in a goroutine so we can be interrupted
var plan * terraform . Plan
var planErr error
doneCh := make ( chan struct { } )
go func ( ) {
defer close ( doneCh )
log . Printf ( "[INFO] backend/local: plan calling Plan" )
plan , planErr = tfCtx . Plan ( )
} ( )
2018-02-12 17:52:21 +01:00
if b . opWait ( doneCh , stopCtx , cancelCtx , tfCtx , opState ) {
2018-02-10 00:10:52 +01:00
return
2017-01-19 05:47:56 +01:00
}
2017-12-02 23:31:28 +01:00
if planErr != nil {
2018-03-21 02:43:02 +01:00
diags = diags . Append ( planErr )
b . ReportResult ( runningOp , diags )
2017-12-02 23:31:28 +01:00
return
}
2017-01-19 05:47:56 +01:00
// Record state
runningOp . PlanEmpty = plan . Diff . Empty ( )
// Save the plan to disk
if path := op . PlanOutPath ; path != "" {
// Write the backend if we have one
plan . Backend = op . PlanOutBackend
2017-03-20 18:05:24 +01:00
// This works around a bug (#12871) which is no longer possible to
// trigger but will exist for already corrupted upgrades.
if plan . Backend != nil && plan . State != nil {
plan . State . Remote = nil
}
2017-01-19 05:47:56 +01:00
log . Printf ( "[INFO] backend/local: writing plan output to: %s" , path )
f , err := os . Create ( path )
if err == nil {
err = terraform . WritePlan ( plan , f )
}
f . Close ( )
if err != nil {
2018-03-21 02:43:02 +01:00
diags = diags . Append ( tfdiags . Sourceless (
tfdiags . Error ,
"Failed to write plan file" ,
fmt . Sprintf ( "The plan file could not be written: %s." , err ) ,
) )
b . ReportResult ( runningOp , diags )
2017-01-19 05:47:56 +01:00
return
}
}
// Perform some output tasks if we have a CLI to output to.
if b . CLI != nil {
command/format: improve consistency of plan results
Previously the rendered plan output was constructed directly from the
core plan and then annotated with counts derived from the count hook.
At various places we applied little adjustments to deal with the fact that
the user-facing diff model is not identical to the internal diff model,
including the special handling of data source reads and destroys. Since
this logic was just muddled into the rendering code, it behaved
inconsistently with the tally of adds, updates and deletes.
This change reworks the plan formatter so that it happens in two stages:
- First, we produce a specialized Plan object that is tailored for use
in the UI. This applies all the relevant logic to transform the
physical model into the user model.
- Second, we do a straightforward visual rendering of the display-oriented
plan object.
For the moment this is slightly overkill since there's only one rendering
path, but it does give us the benefit of letting the counts be derived
from the same data as the full detailed diff, ensuring that they'll stay
consistent.
Later we may choose to have other UIs for plans, such as a
machine-readable output intended to drive a web UI. In that case, we'd
want the web UI to consume a serialization of the _display-oriented_ plan
so that it doesn't need to re-implement all of these UI special cases.
This introduces to core a new diff action type for "refresh". Currently
this is used _only_ in the UI layer, to represent data source reads.
Later it would be good to use this type for the core diff as well, to
improve consistency, but that is left for another day to keep this change
focused on the UI.
2017-08-24 01:23:02 +02:00
dispPlan := format . NewPlan ( plan )
if dispPlan . Empty ( ) {
2017-09-01 04:19:06 +02:00
b . CLI . Output ( "\n" + b . Colorize ( ) . Color ( strings . TrimSpace ( planNoChanges ) ) )
2017-01-19 05:47:56 +01:00
return
}
2017-09-01 04:19:06 +02:00
b . renderPlan ( dispPlan )
2018-03-21 02:43:02 +01:00
// If we've accumulated any warnings along the way then we'll show them
// here just before we show the summary and next steps. If we encountered
// errors then we would've returned early at some other point above.
b . ShowDiagnostics ( diags )
2017-09-09 02:14:37 +02:00
// Give the user some next-steps, unless we're running in an automation
// tool which is presumed to provide its own UI for further actions.
if ! b . RunningInAutomation {
b . CLI . Output ( "\n------------------------------------------------------------------------" )
if path := op . PlanOutPath ; path == "" {
b . CLI . Output ( fmt . Sprintf (
"\n" + strings . TrimSpace ( planHeaderNoOutput ) + "\n" ,
) )
} else {
b . CLI . Output ( fmt . Sprintf (
"\n" + strings . TrimSpace ( planHeaderYesOutput ) + "\n" ,
path , path ,
) )
}
2017-01-19 05:47:56 +01:00
}
2017-09-01 04:19:06 +02:00
}
}
2017-01-19 05:47:56 +01:00
2017-09-01 04:19:06 +02:00
func ( b * Local ) renderPlan ( dispPlan * format . Plan ) {
2017-01-19 05:47:56 +01:00
2017-09-01 04:19:06 +02:00
headerBuf := & bytes . Buffer { }
fmt . Fprintf ( headerBuf , "\n%s\n" , strings . TrimSpace ( planHeaderIntro ) )
counts := dispPlan . ActionCounts ( )
if counts [ terraform . DiffCreate ] > 0 {
fmt . Fprintf ( headerBuf , "%s create\n" , format . DiffActionSymbol ( terraform . DiffCreate ) )
}
if counts [ terraform . DiffUpdate ] > 0 {
fmt . Fprintf ( headerBuf , "%s update in-place\n" , format . DiffActionSymbol ( terraform . DiffUpdate ) )
}
if counts [ terraform . DiffDestroy ] > 0 {
fmt . Fprintf ( headerBuf , "%s destroy\n" , format . DiffActionSymbol ( terraform . DiffDestroy ) )
2017-01-19 05:47:56 +01:00
}
2017-09-01 04:19:06 +02:00
if counts [ terraform . DiffDestroyCreate ] > 0 {
fmt . Fprintf ( headerBuf , "%s destroy and then create replacement\n" , format . DiffActionSymbol ( terraform . DiffDestroyCreate ) )
}
if counts [ terraform . DiffRefresh ] > 0 {
fmt . Fprintf ( headerBuf , "%s read (data resources)\n" , format . DiffActionSymbol ( terraform . DiffRefresh ) )
}
b . CLI . Output ( b . Colorize ( ) . Color ( headerBuf . String ( ) ) )
b . CLI . Output ( "Terraform will perform the following actions:\n" )
b . CLI . Output ( dispPlan . Format ( b . Colorize ( ) ) )
stats := dispPlan . Stats ( )
b . CLI . Output ( b . Colorize ( ) . Color ( fmt . Sprintf (
"[reset][bold]Plan:[reset] " +
"%d to add, %d to change, %d to destroy." ,
stats . ToAdd , stats . ToChange , stats . ToDestroy ,
) ) )
2017-01-19 05:47:56 +01:00
}
2017-01-30 04:51:54 +01:00
const planErrNoConfig = `
No configuration files found !
Plan requires configuration to be present . Planning without a configuration
would mark everything for destruction , which is normally not what is desired .
If you would like to destroy everything , please run plan with the "-destroy"
flag or create a single empty configuration file . Otherwise , please create
a Terraform configuration file in the path being executed and try again .
`
2017-09-01 04:19:06 +02:00
const planHeaderIntro = `
An execution plan has been generated and is shown below .
Resource actions are indicated with the following symbols :
`
2017-01-19 05:47:56 +01:00
const planHeaderNoOutput = `
2017-09-01 04:19:06 +02:00
Note : You didn ' t specify an "-out" parameter to save this plan , so Terraform
can ' t guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run .
2017-01-19 05:47:56 +01:00
`
const planHeaderYesOutput = `
2017-09-01 04:19:06 +02:00
This plan was saved to : % s
2017-01-19 05:47:56 +01:00
2017-09-01 04:19:06 +02:00
To perform exactly these actions , run the following command to apply :
terraform apply % q
2017-01-19 05:47:56 +01:00
`
const planNoChanges = `
[ reset ] [ bold ] [ green ] No changes . Infrastructure is up - to - date . [ reset ] [ green ]
2017-02-14 21:09:45 +01:00
This means that Terraform did not detect any differences between your
2017-09-01 04:19:06 +02:00
configuration and real physical resources that exist . As a result , no
actions need to be performed .
2017-01-19 05:47:56 +01:00
`
const planRefreshing = `
[ reset ] [ bold ] Refreshing Terraform state in - memory prior to plan ... [ reset ]
The refreshed state will be used to calculate this plan , but will not be
persisted to local or remote state storage .
`