Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unmark config values when planning a data source read #28539

Merged
merged 2 commits into from
Apr 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 115 additions & 0 deletions terraform/context_plan2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -515,3 +515,118 @@ output "root" {
t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want)
}
}

func TestContext2Plan_planDataSourceSensitiveNested(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_instance" "bar" {
}

data "test_data_source" "foo" {
foo {
bar = test_instance.bar.sensitive
}
}
`,
})

p := new(MockProvider)
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = cty.ObjectVal(map[string]cty.Value{
"sensitive": cty.UnknownVal(cty.String),
})
return resp
}
p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_instance": {
Attributes: map[string]*configschema.Attribute{
"sensitive": {
Type: cty.String,
Computed: true,
Sensitive: true,
},
},
},
},
DataSources: map[string]*configschema.Block{
"test_data_source": {
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Computed: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{
"foo": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"bar": {Type: cty.String, Optional: true},
},
},
Nesting: configschema.NestingSet,
},
},
},
},
})

state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("data.test_data_source.foo").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"string":"data_id", "foo":[{"bar":"old"}]}`),
AttrSensitivePaths: []cty.PathValueMarks{
{
Path: cty.GetAttrPath("foo"),
Marks: cty.NewValueMarks("sensitive"),
},
},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_instance.bar").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"sensitive":"old"}`),
AttrSensitivePaths: []cty.PathValueMarks{
{
Path: cty.GetAttrPath("sensitive"),
Marks: cty.NewValueMarks("sensitive"),
},
},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)

ctx := testContext2(t, &ContextOpts{
Config: m,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
State: state,
})

plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatal(diags.ErrWithWarnings())
}

for _, res := range plan.Changes.Resources {
switch res.Addr.String() {
case "test_instance.bar":
if res.Action != plans.Update {
t.Fatalf("unexpected %s change for %s", res.Action, res.Addr)
}
case "data.test_data_source.foo":
if res.Action != plans.Read {
t.Fatalf("unexpected %s change for %s", res.Action, res.Addr)
}
default:
t.Fatalf("unexpected %s change for %s", res.Action, res.Addr)
}
}
}
45 changes: 28 additions & 17 deletions terraform/node_resource_abstract.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,10 +347,12 @@ func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.Ab

// readResourceInstanceState reads the current object for a specific instance in
// the state.
func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, error) {
func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, err
diags = diags.Append(err)
return nil, diags
}

log.Printf("[TRACE] readResourceInstanceState: reading state for %s", addr)
Expand All @@ -365,36 +367,41 @@ func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr a
schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource())
if schema == nil {
// Shouldn't happen since we should've failed long ago if no schema is present
return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr)
return nil, diags.Append(fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr))
}
var diags tfdiags.Diagnostics
src, diags = upgradeResourceState(addr, provider, src, schema, currentVersion)
src, upgradeDiags := upgradeResourceState(addr, provider, src, schema, currentVersion)
if n.Config != nil {
upgradeDiags = upgradeDiags.InConfigBody(n.Config.Config, addr.String())
}
diags = diags.Append(upgradeDiags)
if diags.HasErrors() {
// Note that we don't have any channel to return warnings here. We'll
// accept that for now since warnings during a schema upgrade would
// be pretty weird anyway, since this operation is supposed to seem
// invisible to the user.
return nil, diags.Err()
return nil, diags
}

obj, err := src.Decode(schema.ImpliedType())
if err != nil {
return nil, err
diags = diags.Append(err)
}

return obj, nil
return obj, diags
}

// readResourceInstanceStateDeposed reads the deposed object for a specific
// instance in the state.
func (n *NodeAbstractResource) readResourceInstanceStateDeposed(ctx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, error) {
func (n *NodeAbstractResource) readResourceInstanceStateDeposed(ctx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, err
diags = diags.Append(err)
return nil, diags
}

if key == states.NotDeposed {
return nil, fmt.Errorf("readResourceInstanceStateDeposed used with no instance key; this is a bug in Terraform and should be reported")
return nil, diags.Append(fmt.Errorf("readResourceInstanceStateDeposed used with no instance key; this is a bug in Terraform and should be reported"))
}

log.Printf("[TRACE] readResourceInstanceStateDeposed: reading state for %s deposed object %s", addr, key)
Expand All @@ -403,31 +410,35 @@ func (n *NodeAbstractResource) readResourceInstanceStateDeposed(ctx EvalContext,
if src == nil {
// Presumably we only have deposed objects, then.
log.Printf("[TRACE] readResourceInstanceStateDeposed: no state present for %s deposed object %s", addr, key)
return nil, nil
return nil, diags
}

schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource())
if schema == nil {
// Shouldn't happen since we should've failed long ago if no schema is present
return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr)
return nil, diags.Append(fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr))

}

src, diags := upgradeResourceState(addr, provider, src, schema, currentVersion)
src, upgradeDiags := upgradeResourceState(addr, provider, src, schema, currentVersion)
if n.Config != nil {
upgradeDiags = upgradeDiags.InConfigBody(n.Config.Config, addr.String())
}
diags = diags.Append(upgradeDiags)
if diags.HasErrors() {
// Note that we don't have any channel to return warnings here. We'll
// accept that for now since warnings during a schema upgrade would
// be pretty weird anyway, since this operation is supposed to seem
// invisible to the user.
return nil, diags.Err()
return nil, diags
}

obj, err := src.Decode(schema.ImpliedType())
if err != nil {
return nil, err
diags = diags.Append(err)
}

return obj, nil
return obj, diags
}

// graphNodesAreResourceInstancesInDifferentInstancesOfSameModule is an
Expand Down
13 changes: 7 additions & 6 deletions terraform/node_resource_abstract_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -1346,6 +1346,11 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
return nil, nil, diags
}

unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths()
// We drop marks on the values used here as the result is only
// temporarily used for validation.
unmarkedPriorVal, _ := priorVal.UnmarkDeep()

configKnown := configVal.IsWhollyKnown()
// If our configuration contains any unknown values, or we depend on any
// unknown values then we must defer the read to the apply phase by
Expand All @@ -1358,14 +1363,15 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
log.Printf("[TRACE] planDataSource: %s configuration not fully known yet, so deferring to apply phase", n.Addr)
}

proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal)
proposedNewVal := objchange.PlannedDataResourceObject(schema, unmarkedConfigVal)

diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal)
}))
if diags.HasErrors() {
return nil, nil, diags
}
proposedNewVal = proposedNewVal.MarkWithPaths(configMarkPaths)

// Apply detects that the data source will need to be read by the After
// value containing unknowns from PlanDataResourceObject.
Expand Down Expand Up @@ -1408,11 +1414,6 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt

// if we have a prior value, we can check for any irregularities in the response
if !priorVal.IsNull() {
// We drop marks on the values used here as the result is only
// temporarily used for validation.
unmarkedConfigVal, _ := configVal.UnmarkDeep()
unmarkedPriorVal, _ := priorVal.UnmarkDeep()

// While we don't propose planned changes for data sources, we can
// generate a proposed value for comparison to ensure the data source
// is returning a result following the rules of the provider contract.
Expand Down
12 changes: 6 additions & 6 deletions terraform/node_resource_abstract_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ func TestNodeAbstractResource_ReadResourceInstanceState(t *testing.T) {
ctx.ProviderSchemaSchema = mockProvider.ProviderSchema()
ctx.ProviderProvider = providers.Interface(mockProvider)

got, err := test.Node.readResourceInstanceState(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if err != nil {
t.Fatalf("[%s] Got err: %#v", k, err.Error())
got, readDiags := test.Node.readResourceInstanceState(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance))
if readDiags.HasErrors() {
t.Fatalf("[%s] Got err: %#v", k, readDiags.Err())
}

expected := test.ExpectedInstanceId
Expand Down Expand Up @@ -223,9 +223,9 @@ func TestNodeAbstractResource_ReadResourceInstanceStateDeposed(t *testing.T) {

key := states.DeposedKey("00000001") // shim from legacy state assigns 0th deposed index this key

got, err := test.Node.readResourceInstanceStateDeposed(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), key)
if err != nil {
t.Fatalf("[%s] Got err: %#v", k, err.Error())
got, readDiags := test.Node.readResourceInstanceStateDeposed(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), key)
if readDiags.HasErrors() {
t.Fatalf("[%s] Got err: %#v", k, readDiags.Err())
}

expected := test.ExpectedInstanceId
Expand Down
8 changes: 4 additions & 4 deletions terraform/node_resource_apply_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,8 +217,8 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
log.Printf("[TRACE] managedResourceExecute: prior object for %s now deposed with key %s", n.Addr, deposedKey)
}

state, err = n.readResourceInstanceState(ctx, n.ResourceInstanceAddr())
diags = diags.Append(err)
state, readDiags := n.readResourceInstanceState(ctx, n.ResourceInstanceAddr())
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
Expand All @@ -244,8 +244,8 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}

state, err = n.readResourceInstanceState(ctx, n.ResourceInstanceAddr())
diags = diags.Append(err)
state, readDiags = n.readResourceInstanceState(ctx, n.ResourceInstanceAddr())
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
Expand Down
4 changes: 2 additions & 2 deletions terraform/node_resource_destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,8 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx EvalContext) (d
return diags
}

state, err = n.readResourceInstanceState(ctx, addr)
diags = diags.Append(err)
state, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
Expand Down
7 changes: 4 additions & 3 deletions terraform/node_resource_destroy_deposed.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,10 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op w
// Always write the resource back to the state deposed. If it
// was successfully destroyed it will be pruned. If it was not, it will
// be caught on the next run.
err = n.writeResourceInstanceState(ctx, state)
if err != nil {
return diags.Append(err)
writeDiags := n.writeResourceInstanceState(ctx, state)
diags.Append(writeDiags)
if diags.HasErrors() {
return diags
}

diags = diags.Append(n.postApplyHook(ctx, state, diags.Err()))
Expand Down
10 changes: 4 additions & 6 deletions terraform/node_resource_plan_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,16 +52,15 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
addr := n.ResourceInstanceAddr()

var change *plans.ResourceInstanceChange
var state *states.ResourceInstanceObject

_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
}

state, err = n.readResourceInstanceState(ctx, addr)
diags = diags.Append(err)
state, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
Expand Down Expand Up @@ -97,7 +96,6 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addr := n.ResourceInstanceAddr()

var change *plans.ResourceInstanceChange
var instanceRefreshState *states.ResourceInstanceObject
var instancePlanState *states.ResourceInstanceObject

_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
Expand All @@ -111,8 +109,8 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}

instanceRefreshState, err = n.readResourceInstanceState(ctx, addr)
diags = diags.Append(err)
instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
Expand Down
Loading