Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refresh during destroy #27408

Merged
merged 4 commits into from
Jan 11, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
refresh state during a destroy plan
Because the destroy plan only creates the necessary changes for apply to
remove all the resources, it does no reading of resources or data
sources, leading to stale data in the state. In most cases this is not a
problem, but when a provider configuration is using resource values, the
provider may not be able to run correctly during apply. In prior
versions of terraform, the implicit refresh that happened during
`terraform destroy` would update the data sources and remove missing
resources from state as required.

The destroy plan graph has a minimal amount of information, so it is not
feasible to work the reading of resources into the operation without
completely replicating the normal plan graph, and updating the plan
graph and all destroy node implementation is also a considerable amount
of refactoring. Instead, we can run a normal plan which is used to
refresh the state before creating the destroy plan. This brings back
similar behavior to core versions prior to 0.14, and the refresh can
still be skipped using the `-refresh=false` cli flag.
  • Loading branch information
jbardin committed Jan 8, 2021
commit 0b3b84acc1ade2e394fcb7e3b956bf2ea0789eb1
78 changes: 65 additions & 13 deletions terraform/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,44 +547,96 @@ The -target option is not for routine use, and is provided only for exceptional
varVals[k] = dv
}

p := &plans.Plan{
plan := &plans.Plan{
VariableValues: varVals,
TargetAddrs: c.targets,
ProviderSHA256s: c.providerSHA256s,
}

operation := walkPlan
graphType := GraphTypePlan
if c.destroy {
operation = walkPlanDestroy
graphType = GraphTypePlanDestroy
switch {
case c.destroy:
diags = diags.Append(c.destroyPlan(plan))
default:
diags = diags.Append(c.plan(plan))
}

graph, graphDiags := c.Graph(graphType, nil)
return plan, diags
}

func (c *Context) plan(plan *plans.Plan) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics

graph, graphDiags := c.Graph(GraphTypePlan, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return nil, diags
return diags
}

// Do the walk
walker, walkDiags := c.walk(graph, operation)
walker, walkDiags := c.walk(graph, walkPlan)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return nil, diags
return diags
}
p.Changes = c.changes
plan.Changes = c.changes

c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects()

refreshedState := c.refreshState.DeepCopy()
p.State = refreshedState
plan.State = refreshedState

// replace the working state with the updated state, so that immediate calls
// to Apply work as expected.
c.state = refreshedState

return p, diags
return diags
}

func (c *Context) destroyPlan(destroyPlan *plans.Plan) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
c.changes = plans.NewChanges()

// A destroy plan starts by running Refresh to read any pending data
// sources, and remove missing managed resources. This is required because
// a "destroy plan" is only creating delete changes, and is essentially a
// local operation.
if !c.skipRefresh {
refreshPlan := &plans.Plan{
VariableValues: destroyPlan.VariableValues,
TargetAddrs: c.targets,
ProviderSHA256s: c.providerSHA256s,
}

refreshDiags := c.plan(refreshPlan)

diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}

// insert the refreshed state into the destroy plan result, and discard
// the changes recorded from the refresh.
destroyPlan.State = refreshPlan.State
c.changes = plans.NewChanges()
}

graph, graphDiags := c.Graph(GraphTypePlanDestroy, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return diags
}

// Do the walk
walker, walkDiags := c.walk(graph, walkPlan)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return diags
}

destroyPlan.Changes = c.changes
return diags
}

// Refresh goes through all the resources in the state and refreshes them
Expand Down
8 changes: 5 additions & 3 deletions terraform/graph_builder_destroy_plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@ import (
// planning a pure-destroy.
//
// Planning a pure destroy operation is simple because we can ignore most
// ordering configuration and simply reverse the state.
// ordering configuration and simply reverse the state. This graph mainly
// exists for targeting, because we need to walk the destroy dependencies to
// ensure we plan the required resources. Without the requirement for
// targeting, the plan could theoretically be created directly from the state.
type DestroyPlanGraphBuilder struct {
// Config is the configuration tree to build the plan from.
Config *configs.Config
Expand Down Expand Up @@ -72,6 +75,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
State: b.State,
},

// Create the delete changes for root module outputs.
&OutputTransformer{
Config: b.Config,
Destroy: true,
Expand All @@ -93,8 +97,6 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
Schemas: b.Schemas,
},

// Target. Note we don't set "Destroy: true" here since we already
// created proper destroy ordering.
&TargetsTransformer{Targets: b.Targets},

// Close opened plugin connections
Expand Down
7 changes: 7 additions & 0 deletions terraform/node_resource_abstract_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -1401,6 +1401,13 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
return plannedChange, plannedNewState, diags
}

// While this isn't a "diff", continue to call this for data sources.
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Addr, states.CurrentGen, priorVal, configVal)
}))
if diags.HasErrors() {
return nil, nil, diags
}
// We have a complete configuration with no dependencies to wait on, so we
// can read the data source into the state.
newVal, readDiags := n.readDataSource(ctx, configVal)
Expand Down
4 changes: 1 addition & 3 deletions terraform/testdata/apply-destroy-data-resource/main.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
data "null_data_source" "testing" {
inputs = {
test = "yes"
}
foo = "yes"
}
2 changes: 1 addition & 1 deletion terraform/testdata/plan-module-destroy-gh-1835/b/main.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
variable "a_id" {}

resource "aws_instance" "b" {
command = "echo ${var.a_id}"
foo = "echo ${var.a_id}"
}