summaryrefslogtreecommitdiff
path: root/vendor/github.com/hashicorp/terraform/terraform
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp/terraform/terraform')
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go1022
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go866
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go359
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go347
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_error.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_if.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_noop.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go324
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go74
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go67
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go161
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_dot.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go157
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go137
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go245
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go198
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go240
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go357
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go288
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go153
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go360
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go301
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go204
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go72
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go2118
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_filter.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_v1.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/testing.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go135
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go168
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go257
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go269
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_expand.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go241
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go380
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go206
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go321
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_root.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go166
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go16
143 files changed, 21209 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644
index 00000000..306128ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -0,0 +1,1022 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/experiment"
+)
+
+// InputMode defines what sort of input will be asked for when Input
+// is called on Context.
+type InputMode byte
+
+const (
+ // InputModeVar asks for all variables
+ InputModeVar InputMode = 1 << iota
+
+ // InputModeVarUnset asks for variables which are not set yet.
+ // InputModeVar must be set for this to have an effect.
+ InputModeVarUnset
+
+ // InputModeProvider asks for provider variables
+ InputModeProvider
+
+ // InputModeStd is the standard operating mode and asks for both variables
+ // and providers.
+ InputModeStd = InputModeVar | InputModeProvider
+)
+
+var (
+ // contextFailOnShadowError will cause Context operations to return
+ // errors when shadow operations fail. This is only used for testing.
+ contextFailOnShadowError = false
+
+ // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
+ // Plan operation, effectively testing the Diff DeepCopy whenever
+ // a Plan occurs. This is enabled for tests.
+ contextTestDeepCopyOnPlan = false
+)
+
+// ContextOpts are the user-configurable options to create a context with
+// NewContext.
+type ContextOpts struct {
+ Meta *ContextMeta
+ Destroy bool
+ Diff *Diff
+ Hooks []Hook
+ Module *module.Tree
+ Parallelism int
+ State *State
+ StateFutureAllowed bool
+ Providers map[string]ResourceProviderFactory
+ Provisioners map[string]ResourceProvisionerFactory
+ Shadow bool
+ Targets []string
+ Variables map[string]interface{}
+
+ UIInput UIInput
+}
+
+// ContextMeta is metadata about the running context. This is information
+// that this package or structure cannot determine on its own but exposes
+// into Terraform in various ways. This must be provided by the Context
+// initializer.
+type ContextMeta struct {
+ Env string // Env is the state environment
+}
+
+// Context represents all the context that Terraform needs in order to
+// perform operations on infrastructure. This structure is built using
+// NewContext. See the documentation for that.
+//
+// Extra functions on Context can be found in context_*.go files.
+type Context struct {
+ // Maintainer note: Anytime this struct is changed, please verify
+ // that newShadowContext still does the right thing. Tests should
+ // fail regardless but putting this note here as well.
+
+ components contextComponentFactory
+ destroy bool
+ diff *Diff
+ diffLock sync.RWMutex
+ hooks []Hook
+ meta *ContextMeta
+ module *module.Tree
+ sh *stopHook
+ shadow bool
+ state *State
+ stateLock sync.RWMutex
+ targets []string
+ uiInput UIInput
+ variables map[string]interface{}
+
+ l sync.Mutex // Lock acquired during any task
+ parallelSem Semaphore
+ providerInputConfig map[string]map[string]interface{}
+ runLock sync.Mutex
+ runCond *sync.Cond
+ runContext context.Context
+ runContextCancel context.CancelFunc
+ shadowErr error
+}
+
+// NewContext creates a new Context structure.
+//
+// Once a Context is creator, the pointer values within ContextOpts
+// should not be mutated in any way, since the pointers are copied, not
+// the values themselves.
+func NewContext(opts *ContextOpts) (*Context, error) {
+ // Validate the version requirement if it is given
+ if opts.Module != nil {
+ if err := checkRequiredVersion(opts.Module); err != nil {
+ return nil, err
+ }
+ }
+
+ // Copy all the hooks and add our stop hook. We don't append directly
+ // to the Config so that we're not modifying that in-place.
+ sh := new(stopHook)
+ hooks := make([]Hook, len(opts.Hooks)+1)
+ copy(hooks, opts.Hooks)
+ hooks[len(opts.Hooks)] = sh
+
+ state := opts.State
+ if state == nil {
+ state = new(State)
+ state.init()
+ }
+
+ // If our state is from the future, then error. Callers can avoid
+ // this error by explicitly setting `StateFutureAllowed`.
+ if !opts.StateFutureAllowed && state.FromFutureTerraform() {
+ return nil, fmt.Errorf(
+ "Terraform doesn't allow running any operations against a state\n"+
+ "that was written by a future Terraform version. The state is\n"+
+ "reporting it is written by Terraform '%s'.\n\n"+
+ "Please run at least that version of Terraform to continue.",
+ state.TFVersion)
+ }
+
+ // Explicitly reset our state version to our current version so that
+ // any operations we do will write out that our latest version
+ // has run.
+ state.TFVersion = Version
+
+ // Determine parallelism, default to 10. We do this both to limit
+ // CPU pressure but also to have an extra guard against rate throttling
+ // from providers.
+ par := opts.Parallelism
+ if par == 0 {
+ par = 10
+ }
+
+ // Set up the variables in the following sequence:
+ // 0 - Take default values from the configuration
+ // 1 - Take values from TF_VAR_x environment variables
+ // 2 - Take values specified in -var flags, overriding values
+ // set by environment variables if necessary. This includes
+ // values taken from -var-file in addition.
+ variables := make(map[string]interface{})
+
+ if opts.Module != nil {
+ var err error
+ variables, err = Variables(opts.Module, opts.Variables)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ diff := opts.Diff
+ if diff == nil {
+ diff = &Diff{}
+ }
+
+ return &Context{
+ components: &basicComponentFactory{
+ providers: opts.Providers,
+ provisioners: opts.Provisioners,
+ },
+ destroy: opts.Destroy,
+ diff: diff,
+ hooks: hooks,
+ meta: opts.Meta,
+ module: opts.Module,
+ shadow: opts.Shadow,
+ state: state,
+ targets: opts.Targets,
+ uiInput: opts.UIInput,
+ variables: variables,
+
+ parallelSem: NewSemaphore(par),
+ providerInputConfig: make(map[string]map[string]interface{}),
+ sh: sh,
+ }, nil
+}
+
+type ContextGraphOpts struct {
+ // If true, validates the graph structure (checks for cycles).
+ Validate bool
+
+ // Legacy graphs only: won't prune the graph
+ Verbose bool
+}
+
+// Graph returns the graph used for the given operation type.
+//
+// The most extensive or complex graph type is GraphTypePlan.
+func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
+ if opts == nil {
+ opts = &ContextGraphOpts{Validate: true}
+ }
+
+ log.Printf("[INFO] terraform: building graph: %s", typ)
+ switch typ {
+ case GraphTypeApply:
+ return (&ApplyGraphBuilder{
+ Module: c.module,
+ Diff: c.diff,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Provisioners: c.components.ResourceProvisioners(),
+ Targets: c.targets,
+ Destroy: c.destroy,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+
+ case GraphTypeInput:
+ // The input graph is just a slightly modified plan graph
+ fallthrough
+ case GraphTypeValidate:
+ // The validate graph is just a slightly modified plan graph
+ fallthrough
+ case GraphTypePlan:
+ // Create the plan graph builder
+ p := &PlanGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }
+
+ // Some special cases for other graph types shared with plan currently
+ var b GraphBuilder = p
+ switch typ {
+ case GraphTypeInput:
+ b = InputGraphBuilder(p)
+ case GraphTypeValidate:
+ // We need to set the provisioners so those can be validated
+ p.Provisioners = c.components.ResourceProvisioners()
+
+ b = ValidateGraphBuilder(p)
+ }
+
+ return b.Build(RootModulePath)
+
+ case GraphTypePlanDestroy:
+ return (&DestroyPlanGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+
+ case GraphTypeRefresh:
+ return (&RefreshGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+ }
+
+ return nil, fmt.Errorf("unknown graph type: %s", typ)
+}
+
+// ShadowError returns any errors caught during a shadow operation.
+//
+// A shadow operation is an operation run in parallel to a real operation
+// that performs the same tasks using new logic on copied state. The results
+// are compared to ensure that the new logic works the same as the old logic.
+// The shadow never affects the real operation or return values.
+//
+// The result of the shadow operation are only available through this function
+// call after a real operation is complete.
+//
+// For API consumers of Context, you can safely ignore this function
+// completely if you have no interest in helping report experimental feature
+// errors to Terraform maintainers. Otherwise, please call this function
+// after every operation and report this to the user.
+//
+// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
+// the real state or result of a real operation. They are purely informational
+// to assist in future Terraform versions being more stable. Please message
+// this effectively to the end user.
+//
+// This must be called only when no other operation is running (refresh,
+// plan, etc.). The result can be used in parallel to any other operation
+// running.
+func (c *Context) ShadowError() error {
+ return c.shadowErr
+}
+
+// State returns a copy of the current state associated with this context.
+//
+// This cannot safely be called in parallel with any other Context function.
+func (c *Context) State() *State {
+ return c.state.DeepCopy()
+}
+
+// Interpolater returns an Interpolater built on a copy of the state
+// that can be used to test interpolation values.
+func (c *Context) Interpolater() *Interpolater {
+ var varLock sync.Mutex
+ var stateLock sync.RWMutex
+ return &Interpolater{
+ Operation: walkApply,
+ Meta: c.meta,
+ Module: c.module,
+ State: c.state.DeepCopy(),
+ StateLock: &stateLock,
+ VariableValues: c.variables,
+ VariableValuesLock: &varLock,
+ }
+}
+
+// Input asks for input to fill variables and provider configurations.
+// This modifies the configuration in-place, so asking for Input twice
+// may result in different UI output showing different current values.
+func (c *Context) Input(mode InputMode) error {
+ defer c.acquireRun("input")()
+
+ if mode&InputModeVar != 0 {
+ // Walk the variables first for the root module. We walk them in
+ // alphabetical order for UX reasons.
+ rootConf := c.module.Config()
+ names := make([]string, len(rootConf.Variables))
+ m := make(map[string]*config.Variable)
+ for i, v := range rootConf.Variables {
+ names[i] = v.Name
+ m[v.Name] = v
+ }
+ sort.Strings(names)
+ for _, n := range names {
+ // If we only care about unset variables, then if the variable
+ // is set, continue on.
+ if mode&InputModeVarUnset != 0 {
+ if _, ok := c.variables[n]; ok {
+ continue
+ }
+ }
+
+ var valueType config.VariableType
+
+ v := m[n]
+ switch valueType = v.Type(); valueType {
+ case config.VariableTypeUnknown:
+ continue
+ case config.VariableTypeMap:
+ // OK
+ case config.VariableTypeList:
+ // OK
+ case config.VariableTypeString:
+ // OK
+ default:
+ panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
+ }
+
+ // If the variable is not already set, and the variable defines a
+ // default, use that for the value.
+ if _, ok := c.variables[n]; !ok {
+ if v.Default != nil {
+ c.variables[n] = v.Default.(string)
+ continue
+ }
+ }
+
+ // this should only happen during tests
+ if c.uiInput == nil {
+ log.Println("[WARN] Content.uiInput is nil")
+ continue
+ }
+
+ // Ask the user for a value for this variable
+ var value string
+ retry := 0
+ for {
+ var err error
+ value, err = c.uiInput.Input(&InputOpts{
+ Id: fmt.Sprintf("var.%s", n),
+ Query: fmt.Sprintf("var.%s", n),
+ Description: v.Description,
+ })
+ if err != nil {
+ return fmt.Errorf(
+ "Error asking for %s: %s", n, err)
+ }
+
+ if value == "" && v.Required() {
+ // Redo if it is required, but abort if we keep getting
+ // blank entries
+ if retry > 2 {
+ return fmt.Errorf("missing required value for %q", n)
+ }
+ retry++
+ continue
+ }
+
+ break
+ }
+
+ // no value provided, so don't set the variable at all
+ if value == "" {
+ continue
+ }
+
+ decoded, err := parseVariableAsHCL(n, value, valueType)
+ if err != nil {
+ return err
+ }
+
+ if decoded != nil {
+ c.variables[n] = decoded
+ }
+ }
+ }
+
+ if mode&InputModeProvider != 0 {
+ // Build the graph
+ graph, err := c.Graph(GraphTypeInput, nil)
+ if err != nil {
+ return err
+ }
+
+ // Do the walk
+ if _, err := c.walk(graph, nil, walkInput); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Apply applies the changes represented by this context and returns
+// the resulting state.
+//
+// Even in the case an error is returned, the state may be returned and will
+// potentially be partially updated. In addition to returning the resulting
+// state, this context is updated with the latest state.
+//
+// If the state is required after an error, the caller should call
+// Context.State, rather than rely on the return value.
+//
+// TODO: Apply and Refresh should either always return a state, or rely on the
+// State() method. Currently the helper/resource testing framework relies
+// on the absence of a returned state to determine if Destroy can be
+// called, so that will need to be refactored before this can be changed.
+func (c *Context) Apply() (*State, error) {
+ defer c.acquireRun("apply")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // Build the graph.
+ graph, err := c.Graph(GraphTypeApply, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine the operation
+ operation := walkApply
+ if c.destroy {
+ operation = walkDestroy
+ }
+
+ // Walk the graph
+ walker, err := c.walk(graph, graph, operation)
+ if len(walker.ValidationErrors) > 0 {
+ err = multierror.Append(err, walker.ValidationErrors...)
+ }
+
+ // Clean out any unused things
+ c.state.prune()
+
+ return c.state, err
+}
+
+// Plan generates an execution plan for the given context.
+//
+// The execution plan encapsulates the context and can be stored
+// in order to reinstantiate a context later for Apply.
+//
+// Plan also updates the diff of this context to be the diff generated
+// by the plan, so Apply can be called after.
+func (c *Context) Plan() (*Plan, error) {
+ defer c.acquireRun("plan")()
+
+ p := &Plan{
+ Module: c.module,
+ Vars: c.variables,
+ State: c.state,
+ Targets: c.targets,
+ }
+
+ var operation walkOperation
+ if c.destroy {
+ operation = walkPlanDestroy
+ } else {
+ // Set our state to be something temporary. We do this so that
+ // the plan can update a fake state so that variables work, then
+ // we replace it back with our old state.
+ old := c.state
+ if old == nil {
+ c.state = &State{}
+ c.state.init()
+ } else {
+ c.state = old.DeepCopy()
+ }
+ defer func() {
+ c.state = old
+ }()
+
+ operation = walkPlan
+ }
+
+ // Setup our diff
+ c.diffLock.Lock()
+ c.diff = new(Diff)
+ c.diff.init()
+ c.diffLock.Unlock()
+
+ // Build the graph.
+ graphType := GraphTypePlan
+ if c.destroy {
+ graphType = GraphTypePlanDestroy
+ }
+ graph, err := c.Graph(graphType, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the walk
+ walker, err := c.walk(graph, graph, operation)
+ if err != nil {
+ return nil, err
+ }
+ p.Diff = c.diff
+
+ // If this is true, it means we're running unit tests. In this case,
+ // we perform a deep copy just to ensure that all context tests also
+ // test that a diff is copy-able. This will panic if it fails. This
+ // is enabled during unit tests.
+ //
+ // This should never be true during production usage, but even if it is,
+ // it can't do any real harm.
+ if contextTestDeepCopyOnPlan {
+ p.Diff.DeepCopy()
+ }
+
+ /*
+ // We don't do the reverification during the new destroy plan because
+ // it will use a different apply process.
+ if X_legacyGraph {
+ // Now that we have a diff, we can build the exact graph that Apply will use
+ // and catch any possible cycles during the Plan phase.
+ if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
+ return nil, err
+ }
+ }
+ */
+
+ var errs error
+ if len(walker.ValidationErrors) > 0 {
+ errs = multierror.Append(errs, walker.ValidationErrors...)
+ }
+ return p, errs
+}
+
+// Refresh goes through all the resources in the state and refreshes them
+// to their latest state. This will update the state that this context
+// works with, along with returning it.
+//
+// Even in the case an error is returned, the state may be returned and
+// will potentially be partially updated.
+func (c *Context) Refresh() (*State, error) {
+ defer c.acquireRun("refresh")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // Build the graph.
+ graph, err := c.Graph(GraphTypeRefresh, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the walk
+ if _, err := c.walk(graph, graph, walkRefresh); err != nil {
+ return nil, err
+ }
+
+ // Clean out any unused things
+ c.state.prune()
+
+ return c.state, nil
+}
+
+// Stop stops the running task.
+//
+// Stop will block until the task completes.
+func (c *Context) Stop() {
+ log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
+
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // If we're running, then stop
+ if c.runContextCancel != nil {
+ log.Printf("[WARN] terraform: run context exists, stopping")
+
+ // Tell the hook we want to stop
+ c.sh.Stop()
+
+ // Stop the context
+ c.runContextCancel()
+ c.runContextCancel = nil
+ }
+
+ // Grab the condition var before we exit
+ if cond := c.runCond; cond != nil {
+ cond.Wait()
+ }
+
+ log.Printf("[WARN] terraform: stop complete")
+}
+
+// Validate validates the configuration and returns any warnings or errors.
+func (c *Context) Validate() ([]string, []error) {
+ defer c.acquireRun("validate")()
+
+ var errs error
+
+ // Validate the configuration itself
+ if err := c.module.Validate(); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ // This only needs to be done for the root module, since inter-module
+ // variables are validated in the module tree.
+ if config := c.module.Config(); config != nil {
+ // Validate the user variables
+ if err := smcUserVariables(config, c.variables); len(err) > 0 {
+ errs = multierror.Append(errs, err...)
+ }
+ }
+
+ // If we have errors at this point, the graphing has no chance,
+ // so just bail early.
+ if errs != nil {
+ return nil, []error{errs}
+ }
+
+ // Build the graph so we can walk it and run Validate on nodes.
+ // We also validate the graph generated here, but this graph doesn't
+ // necessarily match the graph that Plan will generate, so we'll validate the
+ // graph again later after Planning.
+ graph, err := c.Graph(GraphTypeValidate, nil)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ // Walk
+ walker, err := c.walk(graph, graph, walkValidate)
+ if err != nil {
+ return nil, multierror.Append(errs, err).Errors
+ }
+
+ // Return the result
+ rerrs := multierror.Append(errs, walker.ValidationErrors...)
+
+ sort.Strings(walker.ValidationWarnings)
+ sort.Slice(rerrs.Errors, func(i, j int) bool {
+ return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
+ })
+
+ return walker.ValidationWarnings, rerrs.Errors
+}
+
+// Module returns the module tree associated with this context.
+func (c *Context) Module() *module.Tree {
+ return c.module
+}
+
+// Variables will return the mapping of variables that were defined
+// for this Context. If Input was called, this mapping may be different
+// than what was given.
+func (c *Context) Variables() map[string]interface{} {
+ return c.variables
+}
+
+// SetVariable sets a variable after a context has already been built.
+func (c *Context) SetVariable(k string, v interface{}) {
+ c.variables[k] = v
+}
+
+func (c *Context) acquireRun(phase string) func() {
+ // With the run lock held, grab the context lock to make changes
+ // to the run context.
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // Wait until we're no longer running
+ for c.runCond != nil {
+ c.runCond.Wait()
+ }
+
+ // Build our lock
+ c.runCond = sync.NewCond(&c.l)
+
+ // Setup debugging
+ dbug.SetPhase(phase)
+
+ // Create a new run context
+ c.runContext, c.runContextCancel = context.WithCancel(context.Background())
+
+ // Reset the stop hook so we're not stopped
+ c.sh.Reset()
+
+ // Reset the shadow errors
+ c.shadowErr = nil
+
+ return c.releaseRun
+}
+
+func (c *Context) releaseRun() {
+ // Grab the context lock so that we can make modifications to fields
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // setting the phase to "INVALID" lets us easily detect if we have
+ // operations happening outside of a run, or we missed setting the proper
+ // phase
+ dbug.SetPhase("INVALID")
+
+ // End our run. We check if runContext is non-nil because it can be
+ // set to nil if it was cancelled via Stop()
+ if c.runContextCancel != nil {
+ c.runContextCancel()
+ }
+
+ // Unlock all waiting our condition
+ cond := c.runCond
+ c.runCond = nil
+ cond.Broadcast()
+
+ // Unset the context
+ c.runContext = nil
+}
+
+func (c *Context) walk(
+ graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
+ // Keep track of the "real" context which is the context that does
+ // the real work: talking to real providers, modifying real state, etc.
+ realCtx := c
+
+ // If we don't want shadowing, remove it
+ if !experiment.Enabled(experiment.X_shadow) {
+ shadow = nil
+ }
+
+ // Just log this so we can see it in a debug log
+ if !c.shadow {
+ log.Printf("[WARN] terraform: shadow graph disabled")
+ shadow = nil
+ }
+
+ // If we have a shadow graph, walk that as well
+ var shadowCtx *Context
+ var shadowCloser Shadow
+ if shadow != nil {
+ // Build the shadow context. In the process, override the real context
+ // with the one that is wrapped so that the shadow context can verify
+ // the results of the real.
+ realCtx, shadowCtx, shadowCloser = newShadowContext(c)
+ }
+
+ log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
+
+ walker := &ContextGraphWalker{
+ Context: realCtx,
+ Operation: operation,
+ StopContext: c.runContext,
+ }
+
+ // Watch for a stop so we can call the provider Stop() API.
+ watchStop, watchWait := c.watchStop(walker)
+
+ // Walk the real graph, this will block until it completes
+ realErr := graph.Walk(walker)
+
+ // Close the channel so the watcher stops, and wait for it to return.
+ close(watchStop)
+ <-watchWait
+
+ // If we have a shadow graph and we interrupted the real graph, then
+ // we just close the shadow and never verify it. It is non-trivial to
+ // recreate the exact execution state up until an interruption so this
+ // isn't supported with shadows at the moment.
+ if shadowCloser != nil && c.sh.Stopped() {
+ // Ignore the error result, there is nothing we could care about
+ shadowCloser.CloseShadow()
+
+ // Set it to nil so we don't do anything
+ shadowCloser = nil
+ }
+
+ // If we have a shadow graph, wait for that to complete.
+ if shadowCloser != nil {
+ // Build the graph walker for the shadow. We also wrap this in
+ // a panicwrap so that panics are captured. For the shadow graph,
+ // we just want panics to be normal errors rather than to crash
+ // Terraform.
+ shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
+ Context: shadowCtx,
+ Operation: operation,
+ })
+
+ // Kick off the shadow walk. This will block on any operations
+ // on the real walk so it is fine to start first.
+ log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
+ shadowCh := make(chan error)
+ go func() {
+ shadowCh <- shadow.Walk(shadowWalker)
+ }()
+
+ // Notify the shadow that we're done
+ if err := shadowCloser.CloseShadow(); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // Wait for the walk to end
+ log.Printf("[DEBUG] Waiting for shadow graph to complete...")
+ shadowWalkErr := <-shadowCh
+
+ // Get any shadow errors
+ if err := shadowCloser.ShadowError(); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // Verify the contexts (compare)
+ if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // At this point, if we're supposed to fail on error, then
+ // we PANIC. Some tests just verify that there is an error,
+ // so simply appending it to realErr and returning could hide
+ // shadow problems.
+ //
+ // This must be done BEFORE appending shadowWalkErr since the
+ // shadowWalkErr may include expected errors.
+ //
+ // We only do this if we don't have a real error. In the case of
+ // a real error, we can't guarantee what nodes were and weren't
+ // traversed in parallel scenarios so we can't guarantee no
+ // shadow errors.
+ if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
+ panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
+ }
+
+ // Now, if we have a walk error, we append that through
+ if shadowWalkErr != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
+ }
+
+ if c.shadowErr == nil {
+ log.Printf("[INFO] Shadow graph success!")
+ } else {
+ log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
+
+ // If we're supposed to fail on shadow errors, then report it
+ if contextFailOnShadowError {
+ realErr = multierror.Append(realErr, multierror.Prefix(
+ c.shadowErr, "shadow graph:"))
+ }
+ }
+ }
+
+ return walker, realErr
+}
+
+// watchStop immediately returns a `stop` and a `wait` chan after dispatching
+// the watchStop goroutine. This will watch the runContext for cancellation and
+// stop the providers accordingly. When the watch is no longer needed, the
+// `stop` chan should be closed before waiting on the `wait` chan.
+// The `wait` chan is important, because without synchronizing with the end of
+// the watchStop goroutine, the runContext may also be closed during the select
+// incorrectly causing providers to be stopped. Even if the graph walk is done
+// at that point, stopping a provider permanently cancels its StopContext which
+// can cause later actions to fail.
+func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
+ stop := make(chan struct{})
+ wait := make(chan struct{})
+
+ // get the runContext cancellation channel now, because releaseRun will
+ // write to the runContext field.
+ done := c.runContext.Done()
+
+ go func() {
+ defer close(wait)
+ // Wait for a stop or completion
+ select {
+ case <-done:
+ // done means the context was canceled, so we need to try and stop
+ // providers.
+ case <-stop:
+ // our own stop channel was closed.
+ return
+ }
+
+ // If we're here, we're stopped, trigger the call.
+
+ {
+ // Copy the providers so that a misbehaved blocking Stop doesn't
+ // completely hang Terraform.
+ walker.providerLock.Lock()
+ ps := make([]ResourceProvider, 0, len(walker.providerCache))
+ for _, p := range walker.providerCache {
+ ps = append(ps, p)
+ }
+ defer walker.providerLock.Unlock()
+
+ for _, p := range ps {
+ // We ignore the error for now since there isn't any reasonable
+ // action to take if there is an error here, since the stop is still
+ // advisory: Terraform will exit once the graph node completes.
+ p.Stop()
+ }
+ }
+
+ {
+ // Call stop on all the provisioners
+ walker.provisionerLock.Lock()
+ ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
+ for _, p := range walker.provisionerCache {
+ ps = append(ps, p)
+ }
+ defer walker.provisionerLock.Unlock()
+
+ for _, p := range ps {
+ // We ignore the error for now since there isn't any reasonable
+ // action to take if there is an error here, since the stop is still
+ // advisory: Terraform will exit once the graph node completes.
+ p.Stop()
+ }
+ }
+ }()
+
+ return stop, wait
+}
+
+// parseVariableAsHCL parses the value of a single variable as would have been specified
+// on the command line via -var or in an environment variable named TF_VAR_x, where x is
+// the name of the variable. In order to get around the restriction of HCL requiring a
+// top level object, we prepend a sentinel key, decode the user-specified value as its
+// value and pull the value back out of the resulting map.
+func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
+ // expecting a string so don't decode anything, just strip quotes
+ if targetType == config.VariableTypeString {
+ return strings.Trim(input, `"`), nil
+ }
+
+ // return empty types
+ if strings.TrimSpace(input) == "" {
+ switch targetType {
+ case config.VariableTypeList:
+ return []interface{}{}, nil
+ case config.VariableTypeMap:
+ return make(map[string]interface{}), nil
+ }
+ }
+
+ const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
+ inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
+
+ var decoded map[string]interface{}
+ err := hcl.Decode(&decoded, inputWithSentinal)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
+ }
+
+ if len(decoded) != 1 {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
+ }
+
+ parsedValue, ok := decoded[sentinelValue]
+ if !ok {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+ }
+
+ switch targetType {
+ case config.VariableTypeList:
+ return parsedValue, nil
+ case config.VariableTypeMap:
+ if list, ok := parsedValue.([]map[string]interface{}); ok {
+ return list[0], nil
+ }
+
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+ default:
+ panic(fmt.Errorf("unknown type %s", targetType.Printable()))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644
index 00000000..6f507445
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// contextComponentFactory is the interface that Context uses
+// to initialize various components such as providers and provisioners.
+// This factory gets more information than the raw maps using to initialize
+// a Context. This information is used for debugging.
+type contextComponentFactory interface {
+ // ResourceProvider creates a new ResourceProvider with the given
+ // type. The "uid" is a unique identifier for this provider being
+ // initialized that can be used for internal tracking.
+ ResourceProvider(typ, uid string) (ResourceProvider, error)
+ ResourceProviders() []string
+
+ // ResourceProvisioner creates a new ResourceProvisioner with the
+ // given type. The "uid" is a unique identifier for this provisioner
+ // being initialized that can be used for internal tracking.
+ ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
+ ResourceProvisioners() []string
+}
+
+// basicComponentFactory just calls a factory from a map directly.
+type basicComponentFactory struct {
+ providers map[string]ResourceProviderFactory
+ provisioners map[string]ResourceProvisionerFactory
+}
+
+func (c *basicComponentFactory) ResourceProviders() []string {
+ result := make([]string, len(c.providers))
+ for k, _ := range c.providers {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+func (c *basicComponentFactory) ResourceProvisioners() []string {
+ result := make([]string, len(c.provisioners))
+ for k, _ := range c.provisioners {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
+ f, ok := c.providers[typ]
+ if !ok {
+ return nil, fmt.Errorf("unknown provider %q", typ)
+ }
+
+ return f()
+}
+
+func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
+ f, ok := c.provisioners[typ]
+ if !ok {
+ return nil, fmt.Errorf("unknown provisioner %q", typ)
+ }
+
+ return f()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644
index 00000000..084f0105
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -0,0 +1,32 @@
+package terraform
+
+//go:generate stringer -type=GraphType context_graph_type.go
+
+// GraphType is an enum of the type of graph to create with a Context.
+// The values of the constants may change so they shouldn't be depended on;
+// always use the constant name.
+type GraphType byte
+
+const (
+ GraphTypeInvalid GraphType = 0
+ GraphTypeLegacy GraphType = iota
+ GraphTypeRefresh
+ GraphTypePlan
+ GraphTypePlanDestroy
+ GraphTypeApply
+ GraphTypeInput
+ GraphTypeValidate
+)
+
+// GraphTypeMap is a mapping of human-readable string to GraphType. This
+// is useful to use as the mechanism for human input for configurable
+// graph types.
+var GraphTypeMap = map[string]GraphType{
+ "apply": GraphTypeApply,
+ "input": GraphTypeInput,
+ "plan": GraphTypePlan,
+ "plan-destroy": GraphTypePlanDestroy,
+ "refresh": GraphTypeRefresh,
+ "legacy": GraphTypeLegacy,
+ "validate": GraphTypeValidate,
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644
index 00000000..f1d57760
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// ImportOpts are used as the configuration for Import.
+type ImportOpts struct {
+ // Targets are the targets to import
+ Targets []*ImportTarget
+
+ // Module is optional, and specifies a config module that is loaded
+ // into the graph and evaluated. The use case for this is to provide
+ // provider configuration.
+ Module *module.Tree
+}
+
+// ImportTarget is a single resource to import.
+type ImportTarget struct {
+ // Addr is the full resource address of the resource to import.
+ // Example: "module.foo.aws_instance.bar"
+ Addr string
+
+ // ID is the ID of the resource to import. This is resource-specific.
+ ID string
+
+ // Provider string
+ Provider string
+}
+
+// Import takes already-created external resources and brings them
+// under Terraform management. Import requires the exact type, name, and ID
+// of the resources to import.
+//
+// This operation is idempotent. If the requested resource is already
+// imported, no changes are made to the state.
+//
+// Further, this operation also gracefully handles partial state. If during
+// an import there is a failure, all previously imported resources remain
+// imported.
+func (c *Context) Import(opts *ImportOpts) (*State, error) {
+ // Hold a lock since we can modify our own state here
+ defer c.acquireRun("import")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // If no module is given, default to the module configured with
+ // the Context.
+ module := opts.Module
+ if module == nil {
+ module = c.module
+ }
+
+ // Initialize our graph builder
+ builder := &ImportGraphBuilder{
+ ImportTargets: opts.Targets,
+ Module: module,
+ Providers: c.components.ResourceProviders(),
+ }
+
+ // Build the graph!
+ graph, err := builder.Build(RootModulePath)
+ if err != nil {
+ return c.state, err
+ }
+
+ // Walk it
+ if _, err := c.walk(graph, nil, walkImport); err != nil {
+ return c.state, err
+ }
+
+ // Clean the state
+ c.state.prune()
+
+ return c.state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644
index 00000000..265339f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go
@@ -0,0 +1,523 @@
+package terraform
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+// DebugInfo is the global handler for writing the debug archive. All methods
+// are safe to call concurrently. Setting DebugInfo to nil will disable writing
+// the debug archive. All methods are safe to call on the nil value.
+var dbug *debugInfo
+
+// SetDebugInfo initializes the debug handler with a backing file in the
+// provided directory. This must be called before any other terraform package
+// operations or not at all. Once his is called, CloseDebugInfo should be
+// called before program exit.
+func SetDebugInfo(path string) error {
+ if os.Getenv("TF_DEBUG") == "" {
+ return nil
+ }
+
+ di, err := newDebugInfoFile(path)
+ if err != nil {
+ return err
+ }
+
+ dbug = di
+ return nil
+}
+
+// CloseDebugInfo is the exported interface to Close the debug info handler.
+// The debug handler needs to be closed before program exit, so we export this
+// function to be deferred in the appropriate entrypoint for our executable.
+func CloseDebugInfo() error {
+ return dbug.Close()
+}
+
+// newDebugInfoFile initializes the global debug handler with a backing file in
+// the provided directory.
+func newDebugInfoFile(dir string) (*debugInfo, error) {
+ err := os.MkdirAll(dir, 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: not guaranteed unique, but good enough for now
+ name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
+ archivePath := filepath.Join(dir, name+".tar.gz")
+
+ f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return nil, err
+ }
+ return newDebugInfo(name, f)
+}
+
+// newDebugInfo initializes the global debug handler.
+func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
+ gz := gzip.NewWriter(w)
+
+ d := &debugInfo{
+ name: name,
+ w: w,
+ gz: gz,
+ tar: tar.NewWriter(gz),
+ }
+
+ // create the subdirs we need
+ topHdr := &tar.Header{
+ Name: name,
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ }
+ graphsHdr := &tar.Header{
+ Name: name + "/graphs",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ }
+ err := d.tar.WriteHeader(topHdr)
+ // if the first errors, the second will too
+ err = d.tar.WriteHeader(graphsHdr)
+ if err != nil {
+ return nil, err
+ }
+
+ return d, nil
+}
+
+// debugInfo provides various methods for writing debug information to a
+// central archive. The debugInfo struct should be initialized once before any
+// output is written, and Close should be called before program exit. All
+// exported methods on debugInfo will be safe for concurrent use. The exported
+// methods are also all safe to call on a nil pointer, so that there is no need
+// for conditional blocks before writing debug information.
+//
+// Each write operation done by the debugInfo will flush the gzip.Writer and
+// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
+// ensures that as much data as possible is written to storage in the event of
+// a crash. The append format of the tar file, and the stream format of the
+// gzip writer allow easy recovery f the data in the event that the debugInfo
+// is not closed before program exit.
+type debugInfo struct {
+ sync.Mutex
+
+ // archive root directory name
+ name string
+
+ // current operation phase
+ phase string
+
+ // step is monotonic counter for for recording the order of operations
+ step int
+
+ // flag to protect Close()
+ closed bool
+
+ // the debug log output is in a tar.gz format, written to the io.Writer w
+ w io.Writer
+ gz *gzip.Writer
+ tar *tar.Writer
+}
+
+// Set the name of the current operational phase in the debug handler. Each file
+// in the archive will contain the name of the phase in which it was created,
+// i.e. "input", "apply", "plan", "refresh", "validate"
+func (d *debugInfo) SetPhase(phase string) {
+ if d == nil {
+ return
+ }
+ d.Lock()
+ defer d.Unlock()
+
+ d.phase = phase
+}
+
+// Close the debugInfo, finalizing the data in storage. This closes the
+// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
+// also closed.
+func (d *debugInfo) Close() error {
+ if d == nil {
+ return nil
+ }
+
+ d.Lock()
+ defer d.Unlock()
+
+ if d.closed {
+ return nil
+ }
+ d.closed = true
+
+ d.tar.Close()
+ d.gz.Close()
+
+ if c, ok := d.w.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+// debug buffer is an io.WriteCloser that will write itself to the debug
+// archive when closed.
+type debugBuffer struct {
+ debugInfo *debugInfo
+ name string
+ buf bytes.Buffer
+}
+
+func (b *debugBuffer) Write(d []byte) (int, error) {
+ return b.buf.Write(d)
+}
+
+func (b *debugBuffer) Close() error {
+ return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
+}
+
+// ioutils only has a noop ReadCloser
+type nopWriteCloser struct{}
+
+func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
+func (nopWriteCloser) Close() error { return nil }
+
+// NewFileWriter returns an io.WriteClose that will be buffered and written to
+// the debug archive when closed.
+func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
+ if d == nil {
+ return nopWriteCloser{}
+ }
+
+ return &debugBuffer{
+ debugInfo: d,
+ name: name,
+ }
+}
+
+type syncer interface {
+ Sync() error
+}
+
+type flusher interface {
+ Flush() error
+}
+
+// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
+// on the output writer if they are available.
+func (d *debugInfo) flush() {
+ d.tar.Flush()
+ d.gz.Flush()
+
+ if f, ok := d.w.(flusher); ok {
+ f.Flush()
+ }
+
+ if s, ok := d.w.(syncer); ok {
+ s.Sync()
+ }
+}
+
+// WriteFile writes data as a single file to the debug arhive.
+func (d *debugInfo) WriteFile(name string, data []byte) error {
+ if d == nil {
+ return nil
+ }
+
+ d.Lock()
+ defer d.Unlock()
+ return d.writeFile(name, data)
+}
+
+func (d *debugInfo) writeFile(name string, data []byte) error {
+ defer d.flush()
+ path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
+ d.step++
+
+ hdr := &tar.Header{
+ Name: path,
+ Mode: 0644,
+ Size: int64(len(data)),
+ }
+ err := d.tar.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+
+ _, err = d.tar.Write(data)
+ return err
+}
+
+// DebugHook implements all methods of the terraform.Hook interface, and writes
+// the arguments to a file in the archive. When a suitable format for the
+// argument isn't available, the argument is encoded using json.Marshal. If the
+// debug handler is nil, all DebugHook methods are noop, so no time is spent in
+// marshaling the data structures.
+type DebugHook struct{}
+
+func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+
+ idCopy, err := id.Copy()
+ if err != nil {
+ return HookActionContinue, err
+ }
+ js, err := json.MarshalIndent(idCopy, "", " ")
+ if err != nil {
+ return HookActionContinue, err
+ }
+ buf.Write(js)
+
+ dbug.WriteFile("hook-PreApply", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+
+ if err != nil {
+ buf.WriteString(err.Error())
+ }
+
+ dbug.WriteFile("hook-PostApply", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreDiff", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ idCopy, err := id.Copy()
+ if err != nil {
+ return HookActionContinue, err
+ }
+ js, err := json.MarshalIndent(idCopy, "", " ")
+ if err != nil {
+ return HookActionContinue, err
+ }
+ buf.Write(js)
+
+ dbug.WriteFile("hook-PostDiff", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PreProvision", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PostProvision", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
+ if dbug == nil {
+ return
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s1 + "\n")
+ buf.WriteString(s2 + "\n")
+
+ dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
+}
+
+func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreRefresh", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PostRefresh", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PreImportState", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ for _, is := range iss {
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+ }
+ dbug.WriteFile("hook-PostImportState", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+// skip logging this for now, since it could be huge
+func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
+ return HookActionContinue, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644
index 00000000..a9fae6c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -0,0 +1,866 @@
+package terraform
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// DiffChangeType is an enum with the kind of changes a diff has planned.
+type DiffChangeType byte
+
+const (
+ DiffInvalid DiffChangeType = iota
+ DiffNone
+ DiffCreate
+ DiffUpdate
+ DiffDestroy
+ DiffDestroyCreate
+)
+
+// multiVal matches the index key to a flatmapped set, list or map
+var multiVal = regexp.MustCompile(`\.(#|%)$`)
+
+// Diff trackes the changes that are necessary to apply a configuration
+// to an existing infrastructure.
+type Diff struct {
+ // Modules contains all the modules that have a diff
+ Modules []*ModuleDiff
+}
+
+// Prune cleans out unused structures in the diff without affecting
+// the behavior of the diff at all.
+//
+// This is not safe to call concurrently. This is safe to call on a
+// nil Diff.
+func (d *Diff) Prune() {
+ if d == nil {
+ return
+ }
+
+ // Prune all empty modules
+ newModules := make([]*ModuleDiff, 0, len(d.Modules))
+ for _, m := range d.Modules {
+ // If the module isn't empty, we keep it
+ if !m.Empty() {
+ newModules = append(newModules, m)
+ }
+ }
+ if len(newModules) == 0 {
+ newModules = nil
+ }
+ d.Modules = newModules
+}
+
+// AddModule adds the module with the given path to the diff.
+//
+// This should be the preferred method to add module diffs since it
+// allows us to optimize lookups later as well as control sorting.
+func (d *Diff) AddModule(path []string) *ModuleDiff {
+ m := &ModuleDiff{Path: path}
+ m.init()
+ d.Modules = append(d.Modules, m)
+ return m
+}
+
+// ModuleByPath is used to lookup the module diff for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
+ if d == nil {
+ return nil
+ }
+ for _, mod := range d.Modules {
+ if mod.Path == nil {
+ panic("missing module path")
+ }
+ if reflect.DeepEqual(mod.Path, path) {
+ return mod
+ }
+ }
+ return nil
+}
+
+// RootModule returns the ModuleState for the root module
+func (d *Diff) RootModule() *ModuleDiff {
+ root := d.ModuleByPath(rootModulePath)
+ if root == nil {
+ panic("missing root module")
+ }
+ return root
+}
+
+// Empty returns true if the diff has no changes.
+func (d *Diff) Empty() bool {
+ if d == nil {
+ return true
+ }
+
+ for _, m := range d.Modules {
+ if !m.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *Diff) Equal(d2 *Diff) bool {
+ // If one is nil, they must both be nil
+ if d == nil || d2 == nil {
+ return d == d2
+ }
+
+ // Sort the modules
+ sort.Sort(moduleDiffSort(d.Modules))
+ sort.Sort(moduleDiffSort(d2.Modules))
+
+ // Copy since we have to modify the module destroy flag to false so
+ // we don't compare that. TODO: delete this when we get rid of the
+ // destroy flag on modules.
+ dCopy := d.DeepCopy()
+ d2Copy := d2.DeepCopy()
+ for _, m := range dCopy.Modules {
+ m.Destroy = false
+ }
+ for _, m := range d2Copy.Modules {
+ m.Destroy = false
+ }
+
+ // Use DeepEqual
+ return reflect.DeepEqual(dCopy, d2Copy)
+}
+
+// DeepCopy performs a deep copy of all parts of the Diff, making the
+// resulting Diff safe to use without modifying this one.
+func (d *Diff) DeepCopy() *Diff {
+ copy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*Diff)
+}
+
+func (d *Diff) String() string {
+ var buf bytes.Buffer
+
+ keys := make([]string, 0, len(d.Modules))
+ lookup := make(map[string]*ModuleDiff)
+ for _, m := range d.Modules {
+ key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
+ keys = append(keys, key)
+ lookup[key] = m
+ }
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ m := lookup[key]
+ mStr := m.String()
+
+ // If we're the root module, we just write the output directly.
+ if reflect.DeepEqual(m.Path, rootModulePath) {
+ buf.WriteString(mStr + "\n")
+ continue
+ }
+
+ buf.WriteString(fmt.Sprintf("%s:\n", key))
+
+ s := bufio.NewScanner(strings.NewReader(mStr))
+ for s.Scan() {
+ buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
+ }
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+func (d *Diff) init() {
+ if d.Modules == nil {
+ rootDiff := &ModuleDiff{Path: rootModulePath}
+ d.Modules = []*ModuleDiff{rootDiff}
+ }
+ for _, m := range d.Modules {
+ m.init()
+ }
+}
+
+// ModuleDiff tracks the differences between resources to apply within
+// a single module.
+type ModuleDiff struct {
+ Path []string
+ Resources map[string]*InstanceDiff
+ Destroy bool // Set only by the destroy plan
+}
+
+func (d *ModuleDiff) init() {
+ if d.Resources == nil {
+ d.Resources = make(map[string]*InstanceDiff)
+ }
+ for _, r := range d.Resources {
+ r.init()
+ }
+}
+
+// ChangeType returns the type of changes that the diff for this
+// module includes.
+//
+// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
+// DiffCreate. If an instance within the module has a DiffDestroyCreate
+// then this will register as a DiffCreate for a module.
+func (d *ModuleDiff) ChangeType() DiffChangeType {
+ result := DiffNone
+ for _, r := range d.Resources {
+ change := r.ChangeType()
+ switch change {
+ case DiffCreate, DiffDestroy:
+ if result == DiffNone {
+ result = change
+ }
+ case DiffDestroyCreate, DiffUpdate:
+ result = DiffUpdate
+ }
+ }
+
+ return result
+}
+
+// Empty returns true if the diff has no changes within this module.
+func (d *ModuleDiff) Empty() bool {
+ if d.Destroy {
+ return false
+ }
+
+ if len(d.Resources) == 0 {
+ return true
+ }
+
+ for _, rd := range d.Resources {
+ if !rd.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Instances returns the instance diffs for the id given. This can return
+// multiple instance diffs if there are counts within the resource.
+func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
+ var result []*InstanceDiff
+ for k, diff := range d.Resources {
+ if k == id || strings.HasPrefix(k, id+".") {
+ if !diff.Empty() {
+ result = append(result, diff)
+ }
+ }
+ }
+
+ return result
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (d *ModuleDiff) IsRoot() bool {
+ return reflect.DeepEqual(d.Path, rootModulePath)
+}
+
+// String outputs the diff in a long but command-line friendly output
+// format that users can read to quickly inspect a diff.
+func (d *ModuleDiff) String() string {
+ var buf bytes.Buffer
+
+ names := make([]string, 0, len(d.Resources))
+ for name, _ := range d.Resources {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ rdiff := d.Resources[name]
+
+ crud := "UPDATE"
+ switch {
+ case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
+ crud = "DESTROY/CREATE"
+ case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
+ crud = "DESTROY"
+ case rdiff.RequiresNew():
+ crud = "CREATE"
+ }
+
+ extra := ""
+ if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
+ extra = " (deposed only)"
+ }
+
+ buf.WriteString(fmt.Sprintf(
+ "%s: %s%s\n",
+ crud,
+ name,
+ extra))
+
+ keyLen := 0
+ rdiffAttrs := rdiff.CopyAttributes()
+ keys := make([]string, 0, len(rdiffAttrs))
+ for key, _ := range rdiffAttrs {
+ if key == "id" {
+ continue
+ }
+
+ keys = append(keys, key)
+ if len(key) > keyLen {
+ keyLen = len(key)
+ }
+ }
+ sort.Strings(keys)
+
+ for _, attrK := range keys {
+ attrDiff, _ := rdiff.GetAttribute(attrK)
+
+ v := attrDiff.New
+ u := attrDiff.Old
+ if attrDiff.NewComputed {
+ v = "<computed>"
+ }
+
+ if attrDiff.Sensitive {
+ u = "<sensitive>"
+ v = "<sensitive>"
+ }
+
+ updateMsg := ""
+ if attrDiff.RequiresNew {
+ updateMsg = " (forces new resource)"
+ } else if attrDiff.Sensitive {
+ updateMsg = " (attribute changed)"
+ }
+
+ buf.WriteString(fmt.Sprintf(
+ " %s:%s %#v => %#v%s\n",
+ attrK,
+ strings.Repeat(" ", keyLen-len(attrK)),
+ u,
+ v,
+ updateMsg))
+ }
+ }
+
+ return buf.String()
+}
+
+// InstanceDiff is the diff of a resource from some state to another.
+type InstanceDiff struct {
+ mu sync.Mutex
+ Attributes map[string]*ResourceAttrDiff
+ Destroy bool
+ DestroyDeposed bool
+ DestroyTainted bool
+
+ // Meta is a simple K/V map that is stored in a diff and persisted to
+ // plans but otherwise is completely ignored by Terraform core. It is
+ // mean to be used for additional data a resource may want to pass through.
+ // The value here must only contain Go primitives and collections.
+ Meta map[string]interface{}
+}
+
+func (d *InstanceDiff) Lock() { d.mu.Lock() }
+func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
+
+// ResourceAttrDiff is the diff of a single attribute of a resource.
+type ResourceAttrDiff struct {
+ Old string // Old Value
+ New string // New Value
+ NewComputed bool // True if new value is computed (unknown currently)
+ NewRemoved bool // True if this attribute is being removed
+ NewExtra interface{} // Extra information for the provider
+ RequiresNew bool // True if change requires new resource
+ Sensitive bool // True if the data should not be displayed in UI output
+ Type DiffAttrType
+}
+
+// Empty returns true if the diff for this attr is neutral
+func (d *ResourceAttrDiff) Empty() bool {
+ return d.Old == d.New && !d.NewComputed && !d.NewRemoved
+}
+
+func (d *ResourceAttrDiff) GoString() string {
+ return fmt.Sprintf("*%#v", *d)
+}
+
+// DiffAttrType is an enum type that says whether a resource attribute
+// diff is an input attribute (comes from the configuration) or an
+// output attribute (comes as a result of applying the configuration). An
+// example input would be "ami" for AWS and an example output would be
+// "private_ip".
+type DiffAttrType byte
+
+const (
+ DiffAttrUnknown DiffAttrType = iota
+ DiffAttrInput
+ DiffAttrOutput
+)
+
+func (d *InstanceDiff) init() {
+ if d.Attributes == nil {
+ d.Attributes = make(map[string]*ResourceAttrDiff)
+ }
+}
+
+func NewInstanceDiff() *InstanceDiff {
+ return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
+}
+
+func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
+ if d == nil {
+ return nil, nil
+ }
+
+ dCopy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ return nil, err
+ }
+
+ return dCopy.(*InstanceDiff), nil
+}
+
+// ChangeType returns the DiffChangeType represented by the diff
+// for this single instance.
+func (d *InstanceDiff) ChangeType() DiffChangeType {
+ if d.Empty() {
+ return DiffNone
+ }
+
+ if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
+ return DiffDestroyCreate
+ }
+
+ if d.GetDestroy() || d.GetDestroyDeposed() {
+ return DiffDestroy
+ }
+
+ if d.RequiresNew() {
+ return DiffCreate
+ }
+
+ return DiffUpdate
+}
+
+// Empty returns true if this diff encapsulates no changes.
+func (d *InstanceDiff) Empty() bool {
+ if d == nil {
+ return true
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return !d.Destroy &&
+ !d.DestroyTainted &&
+ !d.DestroyDeposed &&
+ len(d.Attributes) == 0
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
+ // If one is nil, they must both be nil
+ if d == nil || d2 == nil {
+ return d == d2
+ }
+
+ // Use DeepEqual
+ return reflect.DeepEqual(d, d2)
+}
+
+// DeepCopy performs a deep copy of all parts of the InstanceDiff
+func (d *InstanceDiff) DeepCopy() *InstanceDiff {
+ copy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*InstanceDiff)
+}
+
+func (d *InstanceDiff) GoString() string {
+ return fmt.Sprintf("*%#v", InstanceDiff{
+ Attributes: d.Attributes,
+ Destroy: d.Destroy,
+ DestroyTainted: d.DestroyTainted,
+ DestroyDeposed: d.DestroyDeposed,
+ })
+}
+
+// RequiresNew returns true if the diff requires the creation of a new
+// resource (implying the destruction of the old).
+func (d *InstanceDiff) RequiresNew() bool {
+ if d == nil {
+ return false
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.requiresNew()
+}
+
+func (d *InstanceDiff) requiresNew() bool {
+ if d == nil {
+ return false
+ }
+
+ if d.DestroyTainted {
+ return true
+ }
+
+ for _, rd := range d.Attributes {
+ if rd != nil && rd.RequiresNew {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (d *InstanceDiff) GetDestroyDeposed() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.DestroyDeposed
+}
+
+func (d *InstanceDiff) SetDestroyDeposed(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.DestroyDeposed = b
+}
+
+// These methods are properly locked, for use outside other InstanceDiff
+// methods but everywhere else within in the terraform package.
+// TODO refactor the locking scheme
+func (d *InstanceDiff) SetTainted(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.DestroyTainted = b
+}
+
+func (d *InstanceDiff) GetDestroyTainted() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.DestroyTainted
+}
+
+func (d *InstanceDiff) SetDestroy(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.Destroy = b
+}
+
+func (d *InstanceDiff) GetDestroy() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.Destroy
+}
+
+func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.Attributes[key] = attr
+}
+
+func (d *InstanceDiff) DelAttribute(key string) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ delete(d.Attributes, key)
+}
+
+func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ attr, ok := d.Attributes[key]
+ return attr, ok
+}
+func (d *InstanceDiff) GetAttributesLen() int {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return len(d.Attributes)
+}
+
+// Safely copies the Attributes map
+func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ attrs := make(map[string]*ResourceAttrDiff)
+ for k, v := range d.Attributes {
+ attrs[k] = v
+ }
+
+ return attrs
+}
+
+// Same checks whether or not two InstanceDiff's are the "same". When
+// we say "same", it is not necessarily exactly equal. Instead, it is
+// just checking that the same attributes are changing, a destroy
+// isn't suddenly happening, etc.
+func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
+ // we can safely compare the pointers without a lock
+ switch {
+ case d == nil && d2 == nil:
+ return true, ""
+ case d == nil || d2 == nil:
+ return false, "one nil"
+ case d == d2:
+ return true, ""
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ // If we're going from requiring new to NOT requiring new, then we have
+ // to see if all required news were computed. If so, it is allowed since
+ // computed may also mean "same value and therefore not new".
+ oldNew := d.requiresNew()
+ newNew := d2.RequiresNew()
+ if oldNew && !newNew {
+ oldNew = false
+
+ // This section builds a list of ignorable attributes for requiresNew
+ // by removing off any elements of collections going to zero elements.
+ // For collections going to zero, they may not exist at all in the
+ // new diff (and hence RequiresNew == false).
+ ignoreAttrs := make(map[string]struct{})
+ for k, diffOld := range d.Attributes {
+ if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
+ continue
+ }
+
+ // This case is in here as a protection measure. The bug that this
+ // code originally fixed (GH-11349) didn't have to deal with computed
+ // so I'm not 100% sure what the correct behavior is. Best to leave
+ // the old behavior.
+ if diffOld.NewComputed {
+ continue
+ }
+
+ // We're looking for the case a map goes to exactly 0.
+ if diffOld.New != "0" {
+ continue
+ }
+
+ // Found it! Ignore all of these. The prefix here is stripping
+ // off the "%" so it is just "k."
+ prefix := k[:len(k)-1]
+ for k2, _ := range d.Attributes {
+ if strings.HasPrefix(k2, prefix) {
+ ignoreAttrs[k2] = struct{}{}
+ }
+ }
+ }
+
+ for k, rd := range d.Attributes {
+ if _, ok := ignoreAttrs[k]; ok {
+ continue
+ }
+
+ // If the field is requires new and NOT computed, then what
+ // we have is a diff mismatch for sure. We set that the old
+ // diff does REQUIRE a ForceNew.
+ if rd != nil && rd.RequiresNew && !rd.NewComputed {
+ oldNew = true
+ break
+ }
+ }
+ }
+
+ if oldNew != newNew {
+ return false, fmt.Sprintf(
+ "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
+ }
+
+ // Verify that destroy matches. The second boolean here allows us to
+ // have mismatching Destroy if we're moving from RequiresNew true
+ // to false above. Therefore, the second boolean will only pass if
+ // we're moving from Destroy: true to false as well.
+ if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
+ return false, fmt.Sprintf(
+ "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
+ }
+
+ // Go through the old diff and make sure the new diff has all the
+ // same attributes. To start, build up the check map to be all the keys.
+ checkOld := make(map[string]struct{})
+ checkNew := make(map[string]struct{})
+ for k, _ := range d.Attributes {
+ checkOld[k] = struct{}{}
+ }
+ for k, _ := range d2.CopyAttributes() {
+ checkNew[k] = struct{}{}
+ }
+
+ // Make an ordered list so we are sure the approximated hashes are left
+ // to process at the end of the loop
+ keys := make([]string, 0, len(d.Attributes))
+ for k, _ := range d.Attributes {
+ keys = append(keys, k)
+ }
+ sort.StringSlice(keys).Sort()
+
+ for _, k := range keys {
+ diffOld := d.Attributes[k]
+
+ if _, ok := checkOld[k]; !ok {
+ // We're not checking this key for whatever reason (see where
+ // check is modified).
+ continue
+ }
+
+ // Remove this key since we'll never hit it again
+ delete(checkOld, k)
+ delete(checkNew, k)
+
+ _, ok := d2.GetAttribute(k)
+ if !ok {
+ // If there's no new attribute, and the old diff expected the attribute
+ // to be removed, that's just fine.
+ if diffOld.NewRemoved {
+ continue
+ }
+
+ // If the last diff was a computed value then the absense of
+ // that value is allowed since it may mean the value ended up
+ // being the same.
+ if diffOld.NewComputed {
+ ok = true
+ }
+
+ // No exact match, but maybe this is a set containing computed
+ // values. So check if there is an approximate hash in the key
+ // and if so, try to match the key.
+ if strings.Contains(k, "~") {
+ parts := strings.Split(k, ".")
+ parts2 := append([]string(nil), parts...)
+
+ re := regexp.MustCompile(`^~\d+$`)
+ for i, part := range parts {
+ if re.MatchString(part) {
+ // we're going to consider this the base of a
+ // computed hash, and remove all longer matching fields
+ ok = true
+
+ parts2[i] = `\d+`
+ parts2 = parts2[:i+1]
+ break
+ }
+ }
+
+ re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
+ if err != nil {
+ return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
+ }
+
+ for k2, _ := range checkNew {
+ if re.MatchString(k2) {
+ delete(checkNew, k2)
+ }
+ }
+ }
+
+ // This is a little tricky, but when a diff contains a computed
+ // list, set, or map that can only be interpolated after the apply
+ // command has created the dependent resources, it could turn out
+ // that the result is actually the same as the existing state which
+ // would remove the key from the diff.
+ if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+ ok = true
+ }
+
+ // Similarly, in a RequiresNew scenario, a list that shows up in the plan
+ // diff can disappear from the apply diff, which is calculated from an
+ // empty state.
+ if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+ ok = true
+ }
+
+ if !ok {
+ return false, fmt.Sprintf("attribute mismatch: %s", k)
+ }
+ }
+
+ // search for the suffix of the base of a [computed] map, list or set.
+ match := multiVal.FindStringSubmatch(k)
+
+ if diffOld.NewComputed && len(match) == 2 {
+ matchLen := len(match[1])
+
+ // This is a computed list, set, or map, so remove any keys with
+ // this prefix from the check list.
+ kprefix := k[:len(k)-matchLen]
+ for k2, _ := range checkOld {
+ if strings.HasPrefix(k2, kprefix) {
+ delete(checkOld, k2)
+ }
+ }
+ for k2, _ := range checkNew {
+ if strings.HasPrefix(k2, kprefix) {
+ delete(checkNew, k2)
+ }
+ }
+ }
+
+ // TODO: check for the same value if not computed
+ }
+
+ // Check for leftover attributes
+ if len(checkNew) > 0 {
+ extras := make([]string, 0, len(checkNew))
+ for attr, _ := range checkNew {
+ extras = append(extras, attr)
+ }
+ return false,
+ fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
+ }
+
+ return true, ""
+}
+
+// moduleDiffSort implements sort.Interface to sort module diffs by path.
+type moduleDiffSort []*ModuleDiff
+
+func (s moduleDiffSort) Len() int { return len(s) }
+func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s moduleDiffSort) Less(i, j int) bool {
+ a := s[i]
+ b := s[j]
+
+ // If the lengths are different, then the shorter one always wins
+ if len(a.Path) != len(b.Path) {
+ return len(a.Path) < len(b.Path)
+ }
+
+ // Otherwise, compare lexically
+ return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644
index 00000000..bc9d638a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
@@ -0,0 +1,17 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyEdge is an edge that represents a standard "destroy" relationship:
+// Target depends on Source because Source is destroying.
+type DestroyEdge struct {
+ S, T dag.Vertex
+}
+
+func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
+func (e *DestroyEdge) Source() dag.Vertex { return e.S }
+func (e *DestroyEdge) Target() dag.Vertex { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644
index 00000000..3cb088a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -0,0 +1,63 @@
+package terraform
+
+import (
+ "log"
+ "strings"
+)
+
+// EvalNode is the interface that must be implemented by graph nodes to
+// evaluate/execute.
+type EvalNode interface {
+ // Eval evaluates this node with the given context. The second parameter
+ // are the argument values. These will match in order and 1-1 with the
+ // results of the Args() return value.
+ Eval(EvalContext) (interface{}, error)
+}
+
+// GraphNodeEvalable is the interface that graph nodes must implement
+// to enable valuation.
+type GraphNodeEvalable interface {
+ EvalTree() EvalNode
+}
+
+// EvalEarlyExitError is a special error return value that can be returned
+// by eval nodes that does an early exit.
+type EvalEarlyExitError struct{}
+
+func (EvalEarlyExitError) Error() string { return "early exit" }
+
+// Eval evaluates the given EvalNode with the given context, properly
+// evaluating all args in the correct order.
+func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
+ // Call the lower level eval which doesn't understand early exit,
+ // and if we early exit, it isn't an error.
+ result, err := EvalRaw(n, ctx)
+ if err != nil {
+ if _, ok := err.(EvalEarlyExitError); ok {
+ return nil, nil
+ }
+ }
+
+ return result, err
+}
+
+// EvalRaw is like Eval except that it returns all errors, even if they
+// signal something normal such as EvalEarlyExitError.
+func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
+ path := "unknown"
+ if ctx != nil {
+ path = strings.Join(ctx.Path(), ".")
+ }
+
+ log.Printf("[DEBUG] %s: eval: %T", path, n)
+ output, err := n.Eval(ctx)
+ if err != nil {
+ if _, ok := err.(EvalEarlyExitError); ok {
+ log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
+ } else {
+ log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
+ }
+ }
+
+ return output, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644
index 00000000..2f6a4973
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -0,0 +1,359 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalApply is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalApply struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Diff **InstanceDiff
+ Provider *ResourceProvider
+ Output **InstanceState
+ CreateNew *bool
+ Error *error
+}
+
+// TODO: test
+func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
+ diff := *n.Diff
+ provider := *n.Provider
+ state := *n.State
+
+ // If we have no diff, we have nothing to do!
+ if diff.Empty() {
+ log.Printf(
+ "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
+ return nil, nil
+ }
+
+ // Remove any output values from the diff
+ for k, ad := range diff.CopyAttributes() {
+ if ad.Type == DiffAttrOutput {
+ diff.DelAttribute(k)
+ }
+ }
+
+ // If the state is nil, make it non-nil
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ // Flag if we're creating a new instance
+ if n.CreateNew != nil {
+ *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
+ }
+
+ // With the completed diff, apply!
+ log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
+ state, err := provider.Apply(n.Info, state, diff)
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ // Force the "id" attribute to be our ID
+ if state.ID != "" {
+ state.Attributes["id"] = state.ID
+ }
+
+ // If the value is the unknown variable value, then it is an error.
+ // In this case we record the error and remove it from the state
+ for ak, av := range state.Attributes {
+ if av == config.UnknownVariableValue {
+ err = multierror.Append(err, fmt.Errorf(
+ "Attribute with unknown value: %s", ak))
+ delete(state.Attributes, ak)
+ }
+ }
+
+ // Write the final state
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ // If there are no errors, then we append it to our output error
+ // if we have one, otherwise we just output it.
+ if err != nil {
+ if n.Error != nil {
+ helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+ *n.Error = multierror.Append(*n.Error, helpfulErr)
+ } else {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
+type EvalApplyPre struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ diff := *n.Diff
+
+ // If the state is nil, make it non-nil
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ {
+ // Call post-apply hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreApply(n.Info, state, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalApplyPost is an EvalNode implementation that does the post-Apply work
+type EvalApplyPost struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Error *error
+}
+
+// TODO: test
+func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ {
+ // Call post-apply hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostApply(n.Info, state, *n.Error)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, *n.Error
+}
+
+// EvalApplyProvisioners is an EvalNode implementation that executes
+// the provisioners for a resource.
+//
+// TODO(mitchellh): This should probably be split up into a more fine-grained
+// ApplyProvisioner (single) that is looped over.
+type EvalApplyProvisioners struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Resource *config.Resource
+ InterpResource *Resource
+ CreateNew *bool
+ Error *error
+
+ // When is the type of provisioner to run at this point
+ When config.ProvisionerWhen
+}
+
+// TODO: test
+func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ if n.CreateNew != nil && !*n.CreateNew {
+ // If we're not creating a new resource, then don't run provisioners
+ return nil, nil
+ }
+
+ provs := n.filterProvisioners()
+ if len(provs) == 0 {
+ // We have no provisioners, so don't do anything
+ return nil, nil
+ }
+
+ // taint tells us whether to enable tainting.
+ taint := n.When == config.ProvisionerWhenCreate
+
+ if n.Error != nil && *n.Error != nil {
+ if taint {
+ state.Tainted = true
+ }
+
+ // We're already tainted, so just return out
+ return nil, nil
+ }
+
+ {
+ // Call pre hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreProvisionResource(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If there are no errors, then we append it to our output error
+ // if we have one, otherwise we just output it.
+ err := n.apply(ctx, provs)
+ if err != nil {
+ if taint {
+ state.Tainted = true
+ }
+
+ if n.Error != nil {
+ *n.Error = multierror.Append(*n.Error, err)
+ } else {
+ return nil, err
+ }
+ }
+
+ {
+ // Call post hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostProvisionResource(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// filterProvisioners filters the provisioners on the resource to only
+// the provisioners specified by the "when" option.
+func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
+ // Fast path the zero case
+ if n.Resource == nil {
+ return nil
+ }
+
+ if len(n.Resource.Provisioners) == 0 {
+ return nil
+ }
+
+ result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
+ for _, p := range n.Resource.Provisioners {
+ if p.When == n.When {
+ result = append(result, p)
+ }
+ }
+
+ return result
+}
+
+func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
+ state := *n.State
+
+ // Store the original connection info, restore later
+ origConnInfo := state.Ephemeral.ConnInfo
+ defer func() {
+ state.Ephemeral.ConnInfo = origConnInfo
+ }()
+
+ for _, prov := range provs {
+ // Get the provisioner
+ provisioner := ctx.Provisioner(prov.Type)
+
+ // Interpolate the provisioner config
+ provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
+ if err != nil {
+ return err
+ }
+
+ // Interpolate the conn info, since it may contain variables
+ connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
+ if err != nil {
+ return err
+ }
+
+ // Merge the connection information
+ overlay := make(map[string]string)
+ if origConnInfo != nil {
+ for k, v := range origConnInfo {
+ overlay[k] = v
+ }
+ }
+ for k, v := range connInfo.Config {
+ switch vt := v.(type) {
+ case string:
+ overlay[k] = vt
+ case int64:
+ overlay[k] = strconv.FormatInt(vt, 10)
+ case int32:
+ overlay[k] = strconv.FormatInt(int64(vt), 10)
+ case int:
+ overlay[k] = strconv.FormatInt(int64(vt), 10)
+ case float32:
+ overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
+ case float64:
+ overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
+ case bool:
+ overlay[k] = strconv.FormatBool(vt)
+ default:
+ overlay[k] = fmt.Sprintf("%v", vt)
+ }
+ }
+ state.Ephemeral.ConnInfo = overlay
+
+ {
+ // Call pre hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreProvision(n.Info, prov.Type)
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // The output function
+ outputFn := func(msg string) {
+ ctx.Hook(func(h Hook) (HookAction, error) {
+ h.ProvisionOutput(n.Info, prov.Type, msg)
+ return HookActionContinue, nil
+ })
+ }
+
+ // Invoke the Provisioner
+ output := CallbackUIOutput{OutputFn: outputFn}
+ applyErr := provisioner.Apply(&output, state, provConfig)
+
+ // Call post hook
+ hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostProvision(n.Info, prov.Type, applyErr)
+ })
+
+ // Handle the error before we deal with the hook
+ if applyErr != nil {
+ // Determine failure behavior
+ switch prov.OnFailure {
+ case config.ProvisionerOnFailureContinue:
+ log.Printf(
+ "[INFO] apply: %s [%s]: error during provision, continue requested",
+ n.Info.Id, prov.Type)
+
+ case config.ProvisionerOnFailureFail:
+ return applyErr
+ }
+ }
+
+ // Deal with the hook
+ if hookErr != nil {
+ return hookErr
+ }
+ }
+
+ return nil
+
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644
index 00000000..715e79e1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalPreventDestroy is an EvalNode implementation that returns an
+// error if a resource has PreventDestroy configured and the diff
+// would destroy the resource.
+type EvalCheckPreventDestroy struct {
+ Resource *config.Resource
+ ResourceId string
+ Diff **InstanceDiff
+}
+
+func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
+ return nil, nil
+ }
+
+ diff := *n.Diff
+ preventDestroy := n.Resource.Lifecycle.PreventDestroy
+
+ if diff.GetDestroy() && preventDestroy {
+ resourceId := n.ResourceId
+ if resourceId == "" {
+ resourceId = n.Resource.Id()
+ }
+
+ return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
+ }
+
+ return nil, nil
+}
+
+const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644
index 00000000..a1f815b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -0,0 +1,84 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalContext is the interface that is given to eval nodes to execute.
+type EvalContext interface {
+ // Stopped returns a channel that is closed when evaluation is stopped
+ // via Terraform.Context.Stop()
+ Stopped() <-chan struct{}
+
+ // Path is the current module path.
+ Path() []string
+
+ // Hook is used to call hook methods. The callback is called for each
+ // hook and should return the hook action to take and the error.
+ Hook(func(Hook) (HookAction, error)) error
+
+ // Input is the UIInput object for interacting with the UI.
+ Input() UIInput
+
+ // InitProvider initializes the provider with the given name and
+ // returns the implementation of the resource provider or an error.
+ //
+ // It is an error to initialize the same provider more than once.
+ InitProvider(string) (ResourceProvider, error)
+
+ // Provider gets the provider instance with the given name (already
+ // initialized) or returns nil if the provider isn't initialized.
+ Provider(string) ResourceProvider
+
+ // CloseProvider closes provider connections that aren't needed anymore.
+ CloseProvider(string) error
+
+ // ConfigureProvider configures the provider with the given
+ // configuration. This is a separate context call because this call
+ // is used to store the provider configuration for inheritance lookups
+ // with ParentProviderConfig().
+ ConfigureProvider(string, *ResourceConfig) error
+ SetProviderConfig(string, *ResourceConfig) error
+ ParentProviderConfig(string) *ResourceConfig
+
+ // ProviderInput and SetProviderInput are used to configure providers
+ // from user input.
+ ProviderInput(string) map[string]interface{}
+ SetProviderInput(string, map[string]interface{})
+
+ // InitProvisioner initializes the provisioner with the given name and
+ // returns the implementation of the resource provisioner or an error.
+ //
+ // It is an error to initialize the same provisioner more than once.
+ InitProvisioner(string) (ResourceProvisioner, error)
+
+ // Provisioner gets the provisioner instance with the given name (already
+ // initialized) or returns nil if the provisioner isn't initialized.
+ Provisioner(string) ResourceProvisioner
+
+ // CloseProvisioner closes provisioner connections that aren't needed
+ // anymore.
+ CloseProvisioner(string) error
+
+ // Interpolate takes the given raw configuration and completes
+ // the interpolations, returning the processed ResourceConfig.
+ //
+ // The resource argument is optional. If given, it is the resource
+ // that is currently being acted upon.
+ Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
+
+ // SetVariables sets the variables for the module within
+ // this context with the name n. This function call is additive:
+ // the second parameter is merged with any previous call.
+ SetVariables(string, map[string]interface{})
+
+ // Diff returns the global diff as well as the lock that should
+ // be used to modify that diff.
+ Diff() (*Diff, *sync.RWMutex)
+
+ // State returns the global state as well as the lock that should
+ // be used to modify that state.
+ State() (*State, *sync.RWMutex)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644
index 00000000..3dcfb227
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -0,0 +1,347 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// BuiltinEvalContext is an EvalContext implementation that is used by
+// Terraform by default.
+type BuiltinEvalContext struct {
+ // StopContext is the context used to track whether we're complete
+ StopContext context.Context
+
+ // PathValue is the Path that this context is operating within.
+ PathValue []string
+
+ // Interpolater setting below affect the interpolation of variables.
+ //
+ // The InterpolaterVars are the exact value for ${var.foo} values.
+ // The map is shared between all contexts and is a mapping of
+ // PATH to KEY to VALUE. Because it is shared by all contexts as well
+ // as the Interpolater itself, it is protected by InterpolaterVarLock
+ // which must be locked during any access to the map.
+ Interpolater *Interpolater
+ InterpolaterVars map[string]map[string]interface{}
+ InterpolaterVarLock *sync.Mutex
+
+ Components contextComponentFactory
+ Hooks []Hook
+ InputValue UIInput
+ ProviderCache map[string]ResourceProvider
+ ProviderConfigCache map[string]*ResourceConfig
+ ProviderInputConfig map[string]map[string]interface{}
+ ProviderLock *sync.Mutex
+ ProvisionerCache map[string]ResourceProvisioner
+ ProvisionerLock *sync.Mutex
+ DiffValue *Diff
+ DiffLock *sync.RWMutex
+ StateValue *State
+ StateLock *sync.RWMutex
+
+ once sync.Once
+}
+
+func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
+ // This can happen during tests. During tests, we just block forever.
+ if ctx.StopContext == nil {
+ return nil
+ }
+
+ return ctx.StopContext.Done()
+}
+
+func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+ for _, h := range ctx.Hooks {
+ action, err := fn(h)
+ if err != nil {
+ return err
+ }
+
+ switch action {
+ case HookActionContinue:
+ continue
+ case HookActionHalt:
+ // Return an early exit error to trigger an early exit
+ log.Printf("[WARN] Early exit triggered by hook: %T", h)
+ return EvalEarlyExitError{}
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) Input() UIInput {
+ return ctx.InputValue
+}
+
+func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
+ ctx.once.Do(ctx.init)
+
+ // If we already initialized, it is an error
+ if p := ctx.Provider(n); p != nil {
+ return nil, fmt.Errorf("Provider '%s' already initialized", n)
+ }
+
+ // Warning: make sure to acquire these locks AFTER the call to Provider
+ // above, since it also acquires locks.
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+ key := PathCacheKey(providerPath)
+
+ typeName := strings.SplitN(n, ".", 2)[0]
+ p, err := ctx.Components.ResourceProvider(typeName, key)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.ProviderCache[key] = p
+ return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ return ctx.ProviderCache[PathCacheKey(providerPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ var provider interface{}
+ provider = ctx.ProviderCache[PathCacheKey(providerPath)]
+ if provider != nil {
+ if p, ok := provider.(ResourceProviderCloser); ok {
+ delete(ctx.ProviderCache, PathCacheKey(providerPath))
+ return p.Close()
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) ConfigureProvider(
+ n string, cfg *ResourceConfig) error {
+ p := ctx.Provider(n)
+ if p == nil {
+ return fmt.Errorf("Provider '%s' not initialized", n)
+ }
+
+ if err := ctx.SetProviderConfig(n, cfg); err != nil {
+ return nil
+ }
+
+ return p.Configure(cfg)
+}
+
+func (ctx *BuiltinEvalContext) SetProviderConfig(
+ n string, cfg *ResourceConfig) error {
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ // Save the configuration
+ ctx.ProviderLock.Lock()
+ ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
+ ctx.ProviderLock.Unlock()
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ // Make a copy of the path so we can safely edit it
+ path := ctx.Path()
+ pathCopy := make([]string, len(path)+1)
+ copy(pathCopy, path)
+
+ // Go up the tree.
+ for i := len(path) - 1; i >= 0; i-- {
+ pathCopy[i+1] = n
+ k := PathCacheKey(pathCopy[:i+2])
+ if v, ok := ctx.ProviderInputConfig[k]; ok {
+ return v
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ // Save the configuration
+ ctx.ProviderLock.Lock()
+ ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
+ ctx.ProviderLock.Unlock()
+}
+
+func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ // Make a copy of the path so we can safely edit it
+ path := ctx.Path()
+ pathCopy := make([]string, len(path)+1)
+ copy(pathCopy, path)
+
+ // Go up the tree.
+ for i := len(path) - 1; i >= 0; i-- {
+ pathCopy[i+1] = n
+ k := PathCacheKey(pathCopy[:i+2])
+ if v, ok := ctx.ProviderConfigCache[k]; ok {
+ return v
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) InitProvisioner(
+ n string) (ResourceProvisioner, error) {
+ ctx.once.Do(ctx.init)
+
+ // If we already initialized, it is an error
+ if p := ctx.Provisioner(n); p != nil {
+ return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
+ }
+
+ // Warning: make sure to acquire these locks AFTER the call to Provisioner
+ // above, since it also acquires locks.
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+ key := PathCacheKey(provPath)
+
+ p, err := ctx.Components.ResourceProvisioner(n, key)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.ProvisionerCache[key] = p
+ return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+
+ return ctx.ProvisionerCache[PathCacheKey(provPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+
+ var prov interface{}
+ prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
+ if prov != nil {
+ if p, ok := prov.(ResourceProvisionerCloser); ok {
+ delete(ctx.ProvisionerCache, PathCacheKey(provPath))
+ return p.Close()
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) Interpolate(
+ cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
+ if cfg != nil {
+ scope := &InterpolationScope{
+ Path: ctx.Path(),
+ Resource: r,
+ }
+
+ vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the interpolation
+ if err := cfg.Interpolate(vs); err != nil {
+ return nil, err
+ }
+ }
+
+ result := NewResourceConfig(cfg)
+ result.interpolateForce()
+ return result, nil
+}
+
+func (ctx *BuiltinEvalContext) Path() []string {
+ return ctx.PathValue
+}
+
+func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
+ ctx.InterpolaterVarLock.Lock()
+ defer ctx.InterpolaterVarLock.Unlock()
+
+ path := make([]string, len(ctx.Path())+1)
+ copy(path, ctx.Path())
+ path[len(path)-1] = n
+ key := PathCacheKey(path)
+
+ vars := ctx.InterpolaterVars[key]
+ if vars == nil {
+ vars = make(map[string]interface{})
+ ctx.InterpolaterVars[key] = vars
+ }
+
+ for k, v := range vs {
+ vars[k] = v
+ }
+}
+
+func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
+ return ctx.DiffValue, ctx.DiffLock
+}
+
+func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
+ return ctx.StateValue, ctx.StateLock
+}
+
+func (ctx *BuiltinEvalContext) init() {
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644
index 00000000..4f90d5b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -0,0 +1,208 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// MockEvalContext is a mock version of EvalContext that can be used
+// for tests.
+type MockEvalContext struct {
+ StoppedCalled bool
+ StoppedValue <-chan struct{}
+
+ HookCalled bool
+ HookHook Hook
+ HookError error
+
+ InputCalled bool
+ InputInput UIInput
+
+ InitProviderCalled bool
+ InitProviderName string
+ InitProviderProvider ResourceProvider
+ InitProviderError error
+
+ ProviderCalled bool
+ ProviderName string
+ ProviderProvider ResourceProvider
+
+ CloseProviderCalled bool
+ CloseProviderName string
+ CloseProviderProvider ResourceProvider
+
+ ProviderInputCalled bool
+ ProviderInputName string
+ ProviderInputConfig map[string]interface{}
+
+ SetProviderInputCalled bool
+ SetProviderInputName string
+ SetProviderInputConfig map[string]interface{}
+
+ ConfigureProviderCalled bool
+ ConfigureProviderName string
+ ConfigureProviderConfig *ResourceConfig
+ ConfigureProviderError error
+
+ SetProviderConfigCalled bool
+ SetProviderConfigName string
+ SetProviderConfigConfig *ResourceConfig
+
+ ParentProviderConfigCalled bool
+ ParentProviderConfigName string
+ ParentProviderConfigConfig *ResourceConfig
+
+ InitProvisionerCalled bool
+ InitProvisionerName string
+ InitProvisionerProvisioner ResourceProvisioner
+ InitProvisionerError error
+
+ ProvisionerCalled bool
+ ProvisionerName string
+ ProvisionerProvisioner ResourceProvisioner
+
+ CloseProvisionerCalled bool
+ CloseProvisionerName string
+ CloseProvisionerProvisioner ResourceProvisioner
+
+ InterpolateCalled bool
+ InterpolateConfig *config.RawConfig
+ InterpolateResource *Resource
+ InterpolateConfigResult *ResourceConfig
+ InterpolateError error
+
+ PathCalled bool
+ PathPath []string
+
+ SetVariablesCalled bool
+ SetVariablesModule string
+ SetVariablesVariables map[string]interface{}
+
+ DiffCalled bool
+ DiffDiff *Diff
+ DiffLock *sync.RWMutex
+
+ StateCalled bool
+ StateState *State
+ StateLock *sync.RWMutex
+}
+
+func (c *MockEvalContext) Stopped() <-chan struct{} {
+ c.StoppedCalled = true
+ return c.StoppedValue
+}
+
+func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+ c.HookCalled = true
+ if c.HookHook != nil {
+ if _, err := fn(c.HookHook); err != nil {
+ return err
+ }
+ }
+
+ return c.HookError
+}
+
+func (c *MockEvalContext) Input() UIInput {
+ c.InputCalled = true
+ return c.InputInput
+}
+
+func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
+ c.InitProviderCalled = true
+ c.InitProviderName = n
+ return c.InitProviderProvider, c.InitProviderError
+}
+
+func (c *MockEvalContext) Provider(n string) ResourceProvider {
+ c.ProviderCalled = true
+ c.ProviderName = n
+ return c.ProviderProvider
+}
+
+func (c *MockEvalContext) CloseProvider(n string) error {
+ c.CloseProviderCalled = true
+ c.CloseProviderName = n
+ return nil
+}
+
+func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
+ c.ConfigureProviderCalled = true
+ c.ConfigureProviderName = n
+ c.ConfigureProviderConfig = cfg
+ return c.ConfigureProviderError
+}
+
+func (c *MockEvalContext) SetProviderConfig(
+ n string, cfg *ResourceConfig) error {
+ c.SetProviderConfigCalled = true
+ c.SetProviderConfigName = n
+ c.SetProviderConfigConfig = cfg
+ return nil
+}
+
+func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+ c.ParentProviderConfigCalled = true
+ c.ParentProviderConfigName = n
+ return c.ParentProviderConfigConfig
+}
+
+func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
+ c.ProviderInputCalled = true
+ c.ProviderInputName = n
+ return c.ProviderInputConfig
+}
+
+func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
+ c.SetProviderInputCalled = true
+ c.SetProviderInputName = n
+ c.SetProviderInputConfig = cfg
+}
+
+func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
+ c.InitProvisionerCalled = true
+ c.InitProvisionerName = n
+ return c.InitProvisionerProvisioner, c.InitProvisionerError
+}
+
+func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
+ c.ProvisionerCalled = true
+ c.ProvisionerName = n
+ return c.ProvisionerProvisioner
+}
+
+func (c *MockEvalContext) CloseProvisioner(n string) error {
+ c.CloseProvisionerCalled = true
+ c.CloseProvisionerName = n
+ return nil
+}
+
+func (c *MockEvalContext) Interpolate(
+ config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
+ c.InterpolateCalled = true
+ c.InterpolateConfig = config
+ c.InterpolateResource = resource
+ return c.InterpolateConfigResult, c.InterpolateError
+}
+
+func (c *MockEvalContext) Path() []string {
+ c.PathCalled = true
+ return c.PathPath
+}
+
+func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
+ c.SetVariablesCalled = true
+ c.SetVariablesModule = n
+ c.SetVariablesVariables = vs
+}
+
+func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
+ c.DiffCalled = true
+ return c.DiffDiff, c.DiffLock
+}
+
+func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
+ c.StateCalled = true
+ return c.StateState, c.StateLock
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644
index 00000000..2ae56a75
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -0,0 +1,58 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+type EvalCountFixZeroOneBoundary struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
+ // Get the count, important for knowing whether we're supposed to
+ // be adding the zero, or trimming it.
+ count, err := n.Resource.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // Figure what to look for and what to replace it with
+ hunt := n.Resource.Id()
+ replace := hunt + ".0"
+ if count < 2 {
+ hunt, replace = replace, hunt
+ }
+
+ state, lock := ctx.State()
+
+ // Get a lock so we can access this instance and potentially make
+ // changes to it.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs, ok := mod.Resources[hunt]
+ if !ok {
+ return nil, nil
+ }
+
+ // If the replacement key exists, we just keep both
+ if _, ok := mod.Resources[replace]; ok {
+ return nil, nil
+ }
+
+ mod.Resources[replace] = rs
+ delete(mod.Resources, hunt)
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644
index 00000000..91e2b904
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "log"
+)
+
+// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+//
+// This works on the global state.
+type EvalCountFixZeroOneBoundaryGlobal struct{}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
+ // Get the state and lock it since we'll potentially modify it
+ state, lock := ctx.State()
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Prune the state since we require a clean state to work
+ state.prune()
+
+ // Go through each modules since the boundaries are restricted to a
+ // module scope.
+ for _, m := range state.Modules {
+ if err := n.fixModule(m); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
+ // Counts keeps track of keys and their counts
+ counts := make(map[string]int)
+ for k, _ := range m.Resources {
+ // Parse the key
+ key, err := ParseResourceStateKey(k)
+ if err != nil {
+ return err
+ }
+
+ // Set the index to -1 so that we can keep count
+ key.Index = -1
+
+ // Increment
+ counts[key.String()]++
+ }
+
+ // Go through the counts and do the fixup for each resource
+ for raw, count := range counts {
+ // Search and replace this resource
+ search := raw
+ replace := raw + ".0"
+ if count < 2 {
+ search, replace = replace, search
+ }
+ log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs, ok := m.Resources[search]
+ if !ok {
+ continue
+ }
+
+ // If the replacement key exists, we just keep both
+ if _, ok := m.Resources[replace]; ok {
+ continue
+ }
+
+ m.Resources[replace] = rs
+ delete(m.Resources, search)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644
index 00000000..54a8333e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
@@ -0,0 +1,25 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountCheckComputed is an EvalNode that checks if a resource count
+// is computed and errors if so. This can possibly happen across a
+// module boundary and we don't yet support this.
+type EvalCountCheckComputed struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Resource.RawCount.Value() == unknownValue() {
+ return nil, fmt.Errorf(
+ "%s: value of 'count' cannot be computed",
+ n.Resource.Id())
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644
index 00000000..6f09526a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -0,0 +1,478 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCompareDiff is an EvalNode implementation that compares two diffs
+// and errors if the diffs are not equal.
+type EvalCompareDiff struct {
+ Info *InstanceInfo
+ One, Two **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
+ one, two := *n.One, *n.Two
+
+ // If either are nil, let them be empty
+ if one == nil {
+ one = new(InstanceDiff)
+ one.init()
+ }
+ if two == nil {
+ two = new(InstanceDiff)
+ two.init()
+ }
+ oneId, _ := one.GetAttribute("id")
+ twoId, _ := two.GetAttribute("id")
+ one.DelAttribute("id")
+ two.DelAttribute("id")
+ defer func() {
+ if oneId != nil {
+ one.SetAttribute("id", oneId)
+ }
+ if twoId != nil {
+ two.SetAttribute("id", twoId)
+ }
+ }()
+
+ if same, reason := one.Same(two); !same {
+ log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
+ log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
+ log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
+ log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
+ return nil, fmt.Errorf(
+ "%s: diffs didn't match during apply. This is a bug with "+
+ "Terraform and should be reported as a GitHub Issue.\n"+
+ "\n"+
+ "Please include the following information in your report:\n"+
+ "\n"+
+ " Terraform Version: %s\n"+
+ " Resource ID: %s\n"+
+ " Mismatch reason: %s\n"+
+ " Diff One (usually from plan): %#v\n"+
+ " Diff Two (usually from apply): %#v\n"+
+ "\n"+
+ "Also include as much context as you can about your config, state, "+
+ "and the steps you performed to trigger this error.\n",
+ n.Info.Id, Version, n.Info.Id, reason, one, two)
+ }
+
+ return nil, nil
+}
+
+// EvalDiff is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalDiff struct {
+ Name string
+ Info *InstanceInfo
+ Config **ResourceConfig
+ Provider *ResourceProvider
+ Diff **InstanceDiff
+ State **InstanceState
+ OutputDiff **InstanceDiff
+ OutputState **InstanceState
+
+ // Resource is needed to fetch the ignore_changes list so we can
+ // filter user-requested ignored attributes from the diff.
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ config := *n.Config
+ provider := *n.Provider
+
+ // Call pre-diff hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The state for the diff must never be nil
+ diffState := state
+ if diffState == nil {
+ diffState = new(InstanceState)
+ }
+ diffState.init()
+
+ // Diff!
+ diff, err := provider.Diff(n.Info, diffState, config)
+ if err != nil {
+ return nil, err
+ }
+ if diff == nil {
+ diff = new(InstanceDiff)
+ }
+
+ // Set DestroyDeposed if we have deposed instances
+ _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
+ if len(rs.Deposed) > 0 {
+ diff.DestroyDeposed = true
+ }
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Preserve the DestroyTainted flag
+ if n.Diff != nil {
+ diff.SetTainted((*n.Diff).GetDestroyTainted())
+ }
+
+ // Require a destroy if there is an ID and it requires new.
+ if diff.RequiresNew() && state != nil && state.ID != "" {
+ diff.SetDestroy(true)
+ }
+
+ // If we're creating a new resource, compute its ID
+ if diff.RequiresNew() || state == nil || state.ID == "" {
+ var oldID string
+ if state != nil {
+ oldID = state.Attributes["id"]
+ }
+
+ // Add diff to compute new ID
+ diff.init()
+ diff.SetAttribute("id", &ResourceAttrDiff{
+ Old: oldID,
+ NewComputed: true,
+ RequiresNew: true,
+ Type: DiffAttrOutput,
+ })
+ }
+
+ // filter out ignored resources
+ if err := n.processIgnoreChanges(diff); err != nil {
+ return nil, err
+ }
+
+ // Call post-refresh hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Update our output
+ *n.OutputDiff = diff
+
+ // Update the state if we care
+ if n.OutputState != nil {
+ *n.OutputState = state
+
+ // Merge our state so that the state is updated with our plan
+ if !diff.Empty() && n.OutputState != nil {
+ *n.OutputState = state.MergeDiff(diff)
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
+ if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
+ return nil
+ }
+ ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
+
+ if len(ignoreChanges) == 0 {
+ return nil
+ }
+
+ // If we're just creating the resource, we shouldn't alter the
+ // Diff at all
+ if diff.ChangeType() == DiffCreate {
+ return nil
+ }
+
+ // If the resource has been tainted then we don't process ignore changes
+ // since we MUST recreate the entire resource.
+ if diff.GetDestroyTainted() {
+ return nil
+ }
+
+ attrs := diff.CopyAttributes()
+
+ // get the complete set of keys we want to ignore
+ ignorableAttrKeys := make(map[string]bool)
+ for _, ignoredKey := range ignoreChanges {
+ for k := range attrs {
+ if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
+ ignorableAttrKeys[k] = true
+ }
+ }
+ }
+
+ // If the resource was being destroyed, check to see if we can ignore the
+ // reason for it being destroyed.
+ if diff.GetDestroy() {
+ for k, v := range attrs {
+ if k == "id" {
+ // id will always be changed if we intended to replace this instance
+ continue
+ }
+ if v.Empty() || v.NewComputed {
+ continue
+ }
+
+ // If any RequiresNew attribute isn't ignored, we need to keep the diff
+ // as-is to be able to replace the resource.
+ if v.RequiresNew && !ignorableAttrKeys[k] {
+ return nil
+ }
+ }
+
+ // Now that we know that we aren't replacing the instance, we can filter
+ // out all the empty and computed attributes. There may be a bunch of
+ // extraneous attribute diffs for the other non-requires-new attributes
+ // going from "" -> "configval" or "" -> "<computed>".
+ // We must make sure any flatmapped containers are filterred (or not) as a
+ // whole.
+ containers := groupContainers(diff)
+ keep := map[string]bool{}
+ for _, v := range containers {
+ if v.keepDiff() {
+ // At least one key has changes, so list all the sibling keys
+ // to keep in the diff.
+ for k := range v {
+ keep[k] = true
+ }
+ }
+ }
+
+ for k, v := range attrs {
+ if (v.Empty() || v.NewComputed) && !keep[k] {
+ ignorableAttrKeys[k] = true
+ }
+ }
+ }
+
+ // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
+ // attribute diff and the Destroy boolean field
+ log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
+ "because after ignore_changes, this diff no longer requires replacement")
+ diff.DelAttribute("id")
+ diff.SetDestroy(false)
+
+ // If we didn't hit any of our early exit conditions, we can filter the diff.
+ for k := range ignorableAttrKeys {
+ log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
+ n.Resource.Id(), k)
+ diff.DelAttribute(k)
+ }
+
+ return nil
+}
+
+// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
+type flatAttrDiff map[string]*ResourceAttrDiff
+
+// we need to keep all keys if any of them have a diff
+func (f flatAttrDiff) keepDiff() bool {
+ for _, v := range f {
+ if !v.Empty() && !v.NewComputed {
+ return true
+ }
+ }
+ return false
+}
+
+// sets, lists and maps need to be compared for diff inclusion as a whole, so
+// group the flatmapped keys together for easier comparison.
+func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
+ isIndex := multiVal.MatchString
+ containers := map[string]flatAttrDiff{}
+ attrs := d.CopyAttributes()
+ // we need to loop once to find the index key
+ for k := range attrs {
+ if isIndex(k) {
+ // add the key, always including the final dot to fully qualify it
+ containers[k[:len(k)-1]] = flatAttrDiff{}
+ }
+ }
+
+ // loop again to find all the sub keys
+ for prefix, values := range containers {
+ for k, attrDiff := range attrs {
+ // we include the index value as well, since it could be part of the diff
+ if strings.HasPrefix(k, prefix) {
+ values[k] = attrDiff
+ }
+ }
+ }
+
+ return containers
+}
+
+// EvalDiffDestroy is an EvalNode implementation that returns a plain
+// destroy diff.
+type EvalDiffDestroy struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Output **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ // If there is no state or we don't have an ID, we're already destroyed
+ if state == nil || state.ID == "" {
+ return nil, nil
+ }
+
+ // Call pre-diff hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The diff
+ diff := &InstanceDiff{Destroy: true}
+
+ // Call post-diff hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Update our output
+ *n.Output = diff
+
+ return nil, nil
+}
+
+// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalDiffDestroyModule struct {
+ Path []string
+}
+
+// TODO: test
+func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(n.Path)
+ if modDiff == nil {
+ modDiff = diff.AddModule(n.Path)
+ }
+ modDiff.Destroy = true
+
+ return nil, nil
+}
+
+// EvalFilterDiff is an EvalNode implementation that filters the diff
+// according to some filter.
+type EvalFilterDiff struct {
+ // Input and output
+ Diff **InstanceDiff
+ Output **InstanceDiff
+
+ // Destroy, if true, will only include a destroy diff if it is set.
+ Destroy bool
+}
+
+func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
+ if *n.Diff == nil {
+ return nil, nil
+ }
+
+ input := *n.Diff
+ result := new(InstanceDiff)
+
+ if n.Destroy {
+ if input.GetDestroy() || input.RequiresNew() {
+ result.SetDestroy(true)
+ }
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return nil, nil
+}
+
+// EvalReadDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalReadDiff struct {
+ Name string
+ Diff **InstanceDiff
+}
+
+func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(ctx.Path())
+ if modDiff == nil {
+ return nil, nil
+ }
+
+ *n.Diff = modDiff.Resources[n.Name]
+
+ return nil, nil
+}
+
+// EvalWriteDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalWriteDiff struct {
+ Name string
+ Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // The diff to write, if its empty it should write nil
+ var diffVal *InstanceDiff
+ if n.Diff != nil {
+ diffVal = *n.Diff
+ }
+ if diffVal.Empty() {
+ diffVal = nil
+ }
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(ctx.Path())
+ if modDiff == nil {
+ modDiff = diff.AddModule(ctx.Path())
+ }
+ if diffVal != nil {
+ modDiff.Resources[n.Name] = diffVal
+ } else {
+ delete(modDiff.Resources, n.Name)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644
index 00000000..470f798b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
@@ -0,0 +1,20 @@
+package terraform
+
+// EvalReturnError is an EvalNode implementation that returns an
+// error if it is present.
+//
+// This is useful for scenarios where an error has been captured by
+// another EvalNode (like EvalApply) for special EvalTree-based error
+// handling, and that handling has completed, so the error should be
+// returned normally.
+type EvalReturnError struct {
+ Error *error
+}
+
+func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Error == nil {
+ return nil, nil
+ }
+
+ return nil, *n.Error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644
index 00000000..711c625c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
@@ -0,0 +1,25 @@
+package terraform
+
+// EvalNodeFilterFunc is the callback used to replace a node with
+// another to node. To not do the replacement, just return the input node.
+type EvalNodeFilterFunc func(EvalNode) EvalNode
+
+// EvalNodeFilterable is an interface that can be implemented by
+// EvalNodes to allow filtering of sub-elements. Note that this isn't
+// a common thing to implement and you probably don't need it.
+type EvalNodeFilterable interface {
+ EvalNode
+ Filter(EvalNodeFilterFunc)
+}
+
+// EvalFilter runs the filter on the given node and returns the
+// final filtered value. This should be called rather than checking
+// the EvalNode directly since this will properly handle EvalNodeFilterables.
+func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
+ if f, ok := node.(EvalNodeFilterable); ok {
+ f.Filter(fn)
+ return node
+ }
+
+ return fn(node)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644
index 00000000..1a55f024
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
@@ -0,0 +1,49 @@
+package terraform
+
+// EvalNodeOpFilterable is an interface that EvalNodes can implement
+// to be filterable by the operation that is being run on Terraform.
+type EvalNodeOpFilterable interface {
+ IncludeInOp(walkOperation) bool
+}
+
+// EvalNodeFilterOp returns a filter function that filters nodes that
+// include themselves in specific operations.
+func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
+ return func(n EvalNode) EvalNode {
+ include := true
+ if of, ok := n.(EvalNodeOpFilterable); ok {
+ include = of.IncludeInOp(op)
+ }
+ if include {
+ return n
+ }
+
+ return EvalNoop{}
+ }
+}
+
+// EvalOpFilter is an EvalNode implementation that is a proxy to
+// another node but filters based on the operation.
+type EvalOpFilter struct {
+ // Ops is the list of operations to include this node in.
+ Ops []walkOperation
+
+ // Node is the node to execute
+ Node EvalNode
+}
+
+// TODO: test
+func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
+ return EvalRaw(n.Node, ctx)
+}
+
+// EvalNodeOpFilterable impl.
+func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
+ for _, v := range n.Ops {
+ if v == op {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644
index 00000000..d6b46a1f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
@@ -0,0 +1,26 @@
+package terraform
+
+// EvalIf is an EvalNode that is a conditional.
+type EvalIf struct {
+ If func(EvalContext) (bool, error)
+ Then EvalNode
+ Else EvalNode
+}
+
+// TODO: test
+func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
+ yes, err := n.If(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if yes {
+ return EvalRaw(n.Then, ctx)
+ } else {
+ if n.Else != nil {
+ return EvalRaw(n.Else, ctx)
+ }
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644
index 00000000..62cc581f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalImportState is an EvalNode implementation that performs an
+// ImportState operation on a provider. This will return the imported
+// states but won't modify any actual state.
+type EvalImportState struct {
+ Provider *ResourceProvider
+ Info *InstanceInfo
+ Id string
+ Output *[]*InstanceState
+}
+
+// TODO: test
+func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+
+ {
+ // Call pre-import hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreImportState(n.Info, n.Id)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Import!
+ state, err := provider.ImportState(n.Info, n.Id)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ {
+ // Call post-import hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostImportState(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalImportStateVerify verifies the state after ImportState and
+// after the refresh to make sure it is non-nil and valid.
+type EvalImportStateVerify struct {
+ Info *InstanceInfo
+ Id string
+ State **InstanceState
+}
+
+// TODO: test
+func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ if state.Empty() {
+ return nil, fmt.Errorf(
+ "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
+ "exist. Please verify the ID is correct. You cannot import non-existent\n"+
+ "resources using Terraform import.",
+ n.Info.HumanId(),
+ n.Id)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644
index 00000000..6825ff59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -0,0 +1,24 @@
+package terraform
+
+import "github.com/hashicorp/terraform/config"
+
+// EvalInterpolate is an EvalNode implementation that takes a raw
+// configuration and interpolates it.
+type EvalInterpolate struct {
+ Config *config.RawConfig
+ Resource *Resource
+ Output **ResourceConfig
+}
+
+func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
+ rc, err := ctx.Interpolate(n.Config, n.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = rc
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644
index 00000000..f4bc8225
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
@@ -0,0 +1,8 @@
+package terraform
+
+// EvalNoop is an EvalNode that does nothing.
+type EvalNoop struct{}
+
+func (EvalNoop) Eval(EvalContext) (interface{}, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644
index 00000000..cf61781e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalDeleteOutput is an EvalNode implementation that deletes an output
+// from the state.
+type EvalDeleteOutput struct {
+ Name string
+}
+
+// TODO: test
+func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, nil
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ delete(mod.Outputs, n.Name)
+
+ return nil, nil
+}
+
+// EvalWriteOutput is an EvalNode implementation that writes the output
+// for the given name to the current state.
+type EvalWriteOutput struct {
+ Name string
+ Sensitive bool
+ Value *config.RawConfig
+}
+
+// TODO: test
+func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
+ cfg, err := ctx.Interpolate(n.Value, nil)
+ if err != nil {
+ // Log error but continue anyway
+ log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
+ }
+
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, fmt.Errorf("cannot write state to nil state")
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ mod = state.AddModule(ctx.Path())
+ }
+
+ // Get the value from the config
+ var valueRaw interface{} = config.UnknownVariableValue
+ if cfg != nil {
+ var ok bool
+ valueRaw, ok = cfg.Get("value")
+ if !ok {
+ valueRaw = ""
+ }
+ if cfg.IsComputed("value") {
+ valueRaw = config.UnknownVariableValue
+ }
+ }
+
+ switch valueTyped := valueRaw.(type) {
+ case string:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "string",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case []interface{}:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "list",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case map[string]interface{}:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "map",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case []map[string]interface{}:
+ // an HCL map is multi-valued, so if this was read out of a config the
+ // map may still be in a slice.
+ if len(valueTyped) == 1 {
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "map",
+ Sensitive: n.Sensitive,
+ Value: valueTyped[0],
+ }
+ break
+ }
+ return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
+ n.Name, valueTyped, len(valueTyped))
+ default:
+ return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644
index 00000000..092fd18d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -0,0 +1,164 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalSetProviderConfig sets the parent configuration for a provider
+// without configuring that provider, validating it, etc.
+type EvalSetProviderConfig struct {
+ Provider string
+ Config **ResourceConfig
+}
+
+func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+ return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
+}
+
+// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
+// merged with parents and inputs on top of what is configured in the file.
+type EvalBuildProviderConfig struct {
+ Provider string
+ Config **ResourceConfig
+ Output **ResourceConfig
+}
+
+func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+ cfg := *n.Config
+
+ // If we have a configuration set, then merge that in
+ if input := ctx.ProviderInput(n.Provider); input != nil {
+ // "input" is a map of the subset of config values that were known
+ // during the input walk, set by EvalInputProvider. Note that
+ // in particular it does *not* include attributes that had
+ // computed values at input time; those appear *only* in
+ // "cfg" here.
+ rc, err := config.NewRawConfig(input)
+ if err != nil {
+ return nil, err
+ }
+
+ merged := cfg.raw.Merge(rc)
+ cfg = NewResourceConfig(merged)
+ }
+
+ // Get the parent configuration if there is one
+ if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
+ merged := cfg.raw.Merge(parent.raw)
+ cfg = NewResourceConfig(merged)
+ }
+
+ *n.Output = cfg
+ return nil, nil
+}
+
+// EvalConfigProvider is an EvalNode implementation that configures
+// a provider that is already initialized and retrieved.
+type EvalConfigProvider struct {
+ Provider string
+ Config **ResourceConfig
+}
+
+func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
+ return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
+}
+
+// EvalInitProvider is an EvalNode implementation that initializes a provider
+// and returns nothing. The provider can be retrieved again with the
+// EvalGetProvider node.
+type EvalInitProvider struct {
+ Name string
+}
+
+func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
+ return ctx.InitProvider(n.Name)
+}
+
+// EvalCloseProvider is an EvalNode implementation that closes provider
+// connections that aren't needed anymore.
+type EvalCloseProvider struct {
+ Name string
+}
+
+func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.CloseProvider(n.Name)
+ return nil, nil
+}
+
+// EvalGetProvider is an EvalNode implementation that retrieves an already
+// initialized provider instance for the given name.
+type EvalGetProvider struct {
+ Name string
+ Output *ResourceProvider
+}
+
+func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
+ result := ctx.Provider(n.Name)
+ if result == nil {
+ return nil, fmt.Errorf("provider %s not initialized", n.Name)
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return nil, nil
+}
+
+// EvalInputProvider is an EvalNode implementation that asks for input
+// for the given provider configurations.
+type EvalInputProvider struct {
+ Name string
+ Provider *ResourceProvider
+ Config **ResourceConfig
+}
+
+func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
+ // If we already configured this provider, then don't do this again
+ if v := ctx.ProviderInput(n.Name); v != nil {
+ return nil, nil
+ }
+
+ rc := *n.Config
+
+ // Wrap the input into a namespace
+ input := &PrefixUIInput{
+ IdPrefix: fmt.Sprintf("provider.%s", n.Name),
+ QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
+ UIInput: ctx.Input(),
+ }
+
+ // Go through each provider and capture the input necessary
+ // to satisfy it.
+ config, err := (*n.Provider).Input(input, rc)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error configuring %s: %s", n.Name, err)
+ }
+
+ // Set the input that we received so that child modules don't attempt
+ // to ask for input again.
+ if config != nil && len(config.Config) > 0 {
+ // This repository of provider input results on the context doesn't
+ // retain config.ComputedKeys, so we need to filter those out here
+ // in order that later users of this data won't try to use the unknown
+ // value placeholder as if it were a literal value. This map is just
+ // of known values we've been able to complete so far; dynamic stuff
+ // will be merged in by EvalBuildProviderConfig on subsequent
+ // (post-input) walks.
+ confMap := config.Config
+ if config.ComputedKeys != nil {
+ for _, key := range config.ComputedKeys {
+ delete(confMap, key)
+ }
+ }
+
+ ctx.SetProviderInput(n.Name, confMap)
+ } else {
+ ctx.SetProviderInput(n.Name, map[string]interface{}{})
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644
index 00000000..89579c05
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -0,0 +1,47 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
+// and returns nothing. The provisioner can be retrieved again with the
+// EvalGetProvisioner node.
+type EvalInitProvisioner struct {
+ Name string
+}
+
+func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ return ctx.InitProvisioner(n.Name)
+}
+
+// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
+// connections that aren't needed anymore.
+type EvalCloseProvisioner struct {
+ Name string
+}
+
+func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.CloseProvisioner(n.Name)
+ return nil, nil
+}
+
+// EvalGetProvisioner is an EvalNode implementation that retrieves an already
+// initialized provisioner instance for the given name.
+type EvalGetProvisioner struct {
+ Name string
+ Output *ResourceProvisioner
+}
+
+func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ result := ctx.Provisioner(n.Name)
+ if result == nil {
+ return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644
index 00000000..fb85a284
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -0,0 +1,139 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalReadDataDiff is an EvalNode implementation that executes a data
+// resource's ReadDataDiff method to discover what attributes it exports.
+type EvalReadDataDiff struct {
+ Provider *ResourceProvider
+ Output **InstanceDiff
+ OutputState **InstanceState
+ Config **ResourceConfig
+ Info *InstanceInfo
+
+ // Set Previous when re-evaluating diff during apply, to ensure that
+ // the "Destroy" flag is preserved.
+ Previous **InstanceDiff
+}
+
+func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
+ // TODO: test
+
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, nil)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var diff *InstanceDiff
+
+ if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
+ // If we're re-diffing for a diff that was already planning to
+ // destroy, then we'll just continue with that plan.
+ diff = &InstanceDiff{Destroy: true}
+ } else {
+ provider := *n.Provider
+ config := *n.Config
+
+ var err error
+ diff, err = provider.ReadDataDiff(n.Info, config)
+ if err != nil {
+ return nil, err
+ }
+ if diff == nil {
+ diff = new(InstanceDiff)
+ }
+
+ // if id isn't explicitly set then it's always computed, because we're
+ // always "creating a new resource".
+ diff.init()
+ if _, ok := diff.Attributes["id"]; !ok {
+ diff.SetAttribute("id", &ResourceAttrDiff{
+ Old: "",
+ NewComputed: true,
+ RequiresNew: true,
+ Type: DiffAttrOutput,
+ })
+ }
+ }
+
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ *n.Output = diff
+
+ if n.OutputState != nil {
+ state := &InstanceState{}
+ *n.OutputState = state
+
+ // Apply the diff to the returned state, so the state includes
+ // any attribute values that are not computed.
+ if !diff.Empty() && n.OutputState != nil {
+ *n.OutputState = state.MergeDiff(diff)
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalReadDataApply is an EvalNode implementation that executes a data
+// resource's ReadDataApply method to read data from the data source.
+type EvalReadDataApply struct {
+ Provider *ResourceProvider
+ Output **InstanceState
+ Diff **InstanceDiff
+ Info *InstanceInfo
+}
+
+func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
+ // TODO: test
+ provider := *n.Provider
+ diff := *n.Diff
+
+ // If the diff is for *destroying* this resource then we'll
+ // just drop its state and move on, since data resources don't
+ // support an actual "destroy" action.
+ if diff != nil && diff.GetDestroy() {
+ if n.Output != nil {
+ *n.Output = nil
+ }
+ return nil, nil
+ }
+
+ // For the purpose of external hooks we present a data apply as a
+ // "Refresh" rather than an "Apply" because creating a data source
+ // is presented to users/callers as a "read" operation.
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ // We don't have a state yet, so we'll just give the hook an
+ // empty one to work with.
+ return h.PreRefresh(n.Info, &InstanceState{})
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ state, err := provider.ReadDataApply(n.Info, diff)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
+ }
+
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644
index 00000000..fa2b8126
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -0,0 +1,55 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+)
+
+// EvalRefresh is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalRefresh struct {
+ Provider *ResourceProvider
+ State **InstanceState
+ Info *InstanceInfo
+ Output **InstanceState
+}
+
+// TODO: test
+func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ state := *n.State
+
+ // If we have no state, we don't do any refreshing
+ if state == nil {
+ log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
+ return nil, nil
+ }
+
+ // Call pre-refresh hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Refresh!
+ state, err = provider.Refresh(n.Info, state)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+ }
+
+ // Call post-refresh hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644
index 00000000..5eca6782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
@@ -0,0 +1,13 @@
+package terraform
+
+// EvalInstanceInfo is an EvalNode implementation that fills in the
+// InstanceInfo as much as it can.
+type EvalInstanceInfo struct {
+ Info *InstanceInfo
+}
+
+// TODO: test
+func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
+ n.Info.ModulePath = ctx.Path()
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644
index 00000000..82d81782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -0,0 +1,27 @@
+package terraform
+
+// EvalSequence is an EvalNode that evaluates in sequence.
+type EvalSequence struct {
+ Nodes []EvalNode
+}
+
+func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
+ for _, n := range n.Nodes {
+ if n == nil {
+ continue
+ }
+
+ if _, err := EvalRaw(n, ctx); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalNodeFilterable impl.
+func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
+ for i, node := range n.Nodes {
+ n.Nodes[i] = fn(node)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644
index 00000000..126a0e63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -0,0 +1,324 @@
+package terraform
+
+import "fmt"
+
+// EvalReadState is an EvalNode implementation that reads the
+// primary InstanceState for a specific resource out of the state.
+type EvalReadState struct {
+ Name string
+ Output **InstanceState
+}
+
+func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
+ return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+ return rs.Primary, nil
+ })
+}
+
+// EvalReadStateDeposed is an EvalNode implementation that reads the
+// deposed InstanceState for a specific resource out of the state
+type EvalReadStateDeposed struct {
+ Name string
+ Output **InstanceState
+ // Index indicates which instance in the Deposed list to target, or -1 for
+ // the last item.
+ Index int
+}
+
+func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+ return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+ // Get the index. If it is negative, then we get the last one
+ idx := n.Index
+ if idx < 0 {
+ idx = len(rs.Deposed) - 1
+ }
+ if idx >= 0 && idx < len(rs.Deposed) {
+ return rs.Deposed[idx], nil
+ } else {
+ return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
+ }
+ })
+}
+
+// Does the bulk of the work for the various flavors of ReadState eval nodes.
+// Each node just provides a reader function to get from the ResourceState to the
+// InstanceState, and this takes care of all the plumbing.
+func readInstanceFromState(
+ ctx EvalContext,
+ resourceName string,
+ output **InstanceState,
+ readerFn func(*ResourceState) (*InstanceState, error),
+) (*InstanceState, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[resourceName]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // Use the delegate function to get the instance state from the resource state
+ is, err := readerFn(rs)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the result to the output pointer
+ if output != nil {
+ *output = is
+ }
+
+ return is, nil
+}
+
+// EvalRequireState is an EvalNode implementation that early exits
+// if the state doesn't have an ID.
+type EvalRequireState struct {
+ State **InstanceState
+}
+
+func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
+ if n.State == nil {
+ return nil, EvalEarlyExitError{}
+ }
+
+ state := *n.State
+ if state == nil || state.ID == "" {
+ return nil, EvalEarlyExitError{}
+ }
+
+ return nil, nil
+}
+
+// EvalUpdateStateHook is an EvalNode implementation that calls the
+// PostStateUpdate hook with the current state.
+type EvalUpdateStateHook struct{}
+
+func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a full lock. Even calling something like WriteState can modify
+ // (prune) the state, so we need the full lock.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Call the hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostStateUpdate(state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// EvalWriteState is an EvalNode implementation that writes the
+// primary InstanceState for a specific resource into the state.
+type EvalWriteState struct {
+ Name string
+ ResourceType string
+ Provider string
+ Dependencies []string
+ State **InstanceState
+}
+
+func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
+ return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+ func(rs *ResourceState) error {
+ rs.Primary = *n.State
+ return nil
+ },
+ )
+}
+
+// EvalWriteStateDeposed is an EvalNode implementation that writes
+// an InstanceState out to the Deposed list of a resource in the state.
+type EvalWriteStateDeposed struct {
+ Name string
+ ResourceType string
+ Provider string
+ Dependencies []string
+ State **InstanceState
+ // Index indicates which instance in the Deposed list to target, or -1 to append.
+ Index int
+}
+
+func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+ return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+ func(rs *ResourceState) error {
+ if n.Index == -1 {
+ rs.Deposed = append(rs.Deposed, *n.State)
+ } else {
+ rs.Deposed[n.Index] = *n.State
+ }
+ return nil
+ },
+ )
+}
+
+// Pulls together the common tasks of the EvalWriteState nodes. All the args
+// are passed directly down from the EvalNode along with a `writer` function
+// which is yielded the *ResourceState and is responsible for writing an
+// InstanceState to the proper field in the ResourceState.
+func writeInstanceToState(
+ ctx EvalContext,
+ resourceName string,
+ resourceType string,
+ provider string,
+ dependencies []string,
+ writerFn func(*ResourceState) error,
+) (*InstanceState, error) {
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, fmt.Errorf("cannot write state to nil state")
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ mod = state.AddModule(ctx.Path())
+ }
+
+ // Look for the resource state.
+ rs := mod.Resources[resourceName]
+ if rs == nil {
+ rs = &ResourceState{}
+ rs.init()
+ mod.Resources[resourceName] = rs
+ }
+ rs.Type = resourceType
+ rs.Dependencies = dependencies
+ rs.Provider = provider
+
+ if err := writerFn(rs); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// EvalClearPrimaryState is an EvalNode implementation that clears the primary
+// instance from a resource state.
+type EvalClearPrimaryState struct {
+ Name string
+}
+
+func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // Clear primary from the resource state
+ rs.Primary = nil
+
+ return nil, nil
+}
+
+// EvalDeposeState is an EvalNode implementation that takes the primary
+// out of a state and makes it Deposed. This is done at the beginning of
+// create-before-destroy calls so that the create can create while preserving
+// the old state of the to-be-destroyed resource.
+type EvalDeposeState struct {
+ Name string
+}
+
+// TODO: test
+func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // If we don't have a primary, we have nothing to depose
+ if rs.Primary == nil {
+ return nil, nil
+ }
+
+ // Depose
+ rs.Deposed = append(rs.Deposed, rs.Primary)
+ rs.Primary = nil
+
+ return nil, nil
+}
+
+// EvalUndeposeState is an EvalNode implementation that reads the
+// InstanceState for a specific resource out of the state.
+type EvalUndeposeState struct {
+ Name string
+ State **InstanceState
+}
+
+// TODO: test
+func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // If we don't have any desposed resource, then we don't have anything to do
+ if len(rs.Deposed) == 0 {
+ return nil, nil
+ }
+
+ // Undepose
+ idx := len(rs.Deposed) - 1
+ rs.Primary = rs.Deposed[idx]
+ rs.Deposed[idx] = *n.State
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644
index 00000000..478aa640
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -0,0 +1,227 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/mapstructure"
+)
+
+// EvalValidateError is the error structure returned if there were
+// validation errors.
+type EvalValidateError struct {
+ Warnings []string
+ Errors []error
+}
+
+func (e *EvalValidateError) Error() string {
+ return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
+}
+
+// EvalValidateCount is an EvalNode implementation that validates
+// the count of a resource.
+type EvalValidateCount struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
+ var count int
+ var errs []error
+ var err error
+ if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "Failed to interpolate count: %s", err))
+ goto RETURN
+ }
+
+ count, err = n.Resource.Count()
+ if err != nil {
+ // If we can't get the count during validation, then
+ // just replace it with the number 1.
+ c := n.Resource.RawCount.Config()
+ c[n.Resource.RawCount.Key] = "1"
+ count = 1
+ }
+ err = nil
+
+ if count < 0 {
+ errs = append(errs, fmt.Errorf(
+ "Count is less than zero: %d", count))
+ }
+
+RETURN:
+ if len(errs) != 0 {
+ err = &EvalValidateError{
+ Errors: errs,
+ }
+ }
+ return nil, err
+}
+
+// EvalValidateProvider is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvider struct {
+ Provider *ResourceProvider
+ Config **ResourceConfig
+}
+
+func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ config := *n.Config
+
+ warns, errs := provider.Validate(config)
+ if len(warns) == 0 && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
+
+// EvalValidateProvisioner is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvisioner struct {
+ Provisioner *ResourceProvisioner
+ Config **ResourceConfig
+ ConnConfig **ResourceConfig
+}
+
+func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ provisioner := *n.Provisioner
+ config := *n.Config
+ var warns []string
+ var errs []error
+
+ {
+ // Validate the provisioner's own config first
+ w, e := provisioner.Validate(config)
+ warns = append(warns, w...)
+ errs = append(errs, e...)
+ }
+
+ {
+ // Now validate the connection config, which might either be from
+ // the provisioner block itself or inherited from the resource's
+ // shared connection info.
+ w, e := n.validateConnConfig(*n.ConnConfig)
+ warns = append(warns, w...)
+ errs = append(errs, e...)
+ }
+
+ if len(warns) == 0 && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
+
+func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
+ // We can't comprehensively validate the connection config since its
+ // final structure is decided by the communicator and we can't instantiate
+ // that until we have a complete instance state. However, we *can* catch
+ // configuration keys that are not valid for *any* communicator, catching
+ // typos early rather than waiting until we actually try to run one of
+ // the resource's provisioners.
+
+ type connConfigSuperset struct {
+ // All attribute types are interface{} here because at this point we
+ // may still have unresolved interpolation expressions, which will
+ // appear as strings regardless of the final goal type.
+
+ Type interface{} `mapstructure:"type"`
+ User interface{} `mapstructure:"user"`
+ Password interface{} `mapstructure:"password"`
+ Host interface{} `mapstructure:"host"`
+ Port interface{} `mapstructure:"port"`
+ Timeout interface{} `mapstructure:"timeout"`
+ ScriptPath interface{} `mapstructure:"script_path"`
+
+ // For type=ssh only (enforced in ssh communicator)
+ PrivateKey interface{} `mapstructure:"private_key"`
+ Agent interface{} `mapstructure:"agent"`
+ BastionHost interface{} `mapstructure:"bastion_host"`
+ BastionPort interface{} `mapstructure:"bastion_port"`
+ BastionUser interface{} `mapstructure:"bastion_user"`
+ BastionPassword interface{} `mapstructure:"bastion_password"`
+ BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
+
+ // For type=winrm only (enforced in winrm communicator)
+ HTTPS interface{} `mapstructure:"https"`
+ Insecure interface{} `mapstructure:"insecure"`
+ CACert interface{} `mapstructure:"cacert"`
+ }
+
+ var metadata mapstructure.Metadata
+ decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+ Metadata: &metadata,
+ Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
+ })
+ if err != nil {
+ // should never happen
+ errs = append(errs, err)
+ return
+ }
+
+ if err := decoder.Decode(connConfig.Config); err != nil {
+ errs = append(errs, err)
+ return
+ }
+
+ for _, attrName := range metadata.Unused {
+ errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
+ }
+ return
+}
+
+// EvalValidateResource is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateResource struct {
+ Provider *ResourceProvider
+ Config **ResourceConfig
+ ResourceName string
+ ResourceType string
+ ResourceMode config.ResourceMode
+
+ // IgnoreWarnings means that warnings will not be passed through. This allows
+ // "just-in-time" passes of validation to continue execution through warnings.
+ IgnoreWarnings bool
+}
+
+func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ cfg := *n.Config
+ var warns []string
+ var errs []error
+ // Provider entry point varies depending on resource mode, because
+ // managed resources and data resources are two distinct concepts
+ // in the provider abstraction.
+ switch n.ResourceMode {
+ case config.ManagedResourceMode:
+ warns, errs = provider.ValidateResource(n.ResourceType, cfg)
+ case config.DataResourceMode:
+ warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
+ }
+
+ // If the resource name doesn't match the name regular
+ // expression, show an error.
+ if !config.NameRegexp.Match([]byte(n.ResourceName)) {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource name can only contain letters, numbers, "+
+ "dashes, and underscores.", n.ResourceName))
+ }
+
+ if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644
index 00000000..ae4436a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -0,0 +1,74 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
+// a configuration doesn't contain a reference to the resource itself.
+//
+// This must be done prior to interpolating configuration in order to avoid
+// any infinite loop scenarios.
+type EvalValidateResourceSelfRef struct {
+ Addr **ResourceAddress
+ Config **config.RawConfig
+}
+
+func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
+ addr := *n.Addr
+ conf := *n.Config
+
+ // Go through the variables and find self references
+ var errs []error
+ for k, raw := range conf.Variables {
+ rv, ok := raw.(*config.ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ // Build an address from the variable
+ varAddr := &ResourceAddress{
+ Path: addr.Path,
+ Mode: rv.Mode,
+ Type: rv.Type,
+ Name: rv.Name,
+ Index: rv.Index,
+ InstanceType: TypePrimary,
+ }
+
+ // If the variable access is a multi-access (*), then we just
+ // match the index so that we'll match our own addr if everything
+ // else matches.
+ if rv.Multi && rv.Index == -1 {
+ varAddr.Index = addr.Index
+ }
+
+ // This is a weird thing where ResourceAddres has index "-1" when
+ // index isn't set at all. This means index "0" for resource access.
+ // So, if we have this scenario, just set our varAddr to -1 so it
+ // matches.
+ if addr.Index == -1 && varAddr.Index == 0 {
+ varAddr.Index = -1
+ }
+
+ // If the addresses match, then this is a self reference
+ if varAddr.Equals(addr) && varAddr.Index == addr.Index {
+ errs = append(errs, fmt.Errorf(
+ "%s: self reference not allowed: %q",
+ addr, k))
+ }
+ }
+
+ // If no errors, no errors!
+ if len(errs) == 0 {
+ return nil, nil
+ }
+
+ // Wrap the errors in the proper wrapper so we can handle validation
+ // formatting properly upstream.
+ return nil, &EvalValidateError{
+ Errors: errs,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644
index 00000000..e39a33c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -0,0 +1,279 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// EvalTypeCheckVariable is an EvalNode which ensures that the variable
+// values which are assigned as inputs to a module (including the root)
+// match the types which are either declared for the variables explicitly
+// or inferred from the default values.
+//
+// In order to achieve this three things are required:
+// - a map of the proposed variable values
+// - the configuration tree of the module in which the variable is
+// declared
+// - the path to the module (so we know which part of the tree to
+// compare the values against).
+type EvalTypeCheckVariable struct {
+ Variables map[string]interface{}
+ ModulePath []string
+ ModuleTree *module.Tree
+}
+
+func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
+ currentTree := n.ModuleTree
+ for _, pathComponent := range n.ModulePath[1:] {
+ currentTree = currentTree.Children()[pathComponent]
+ }
+ targetConfig := currentTree.Config()
+
+ prototypes := make(map[string]config.VariableType)
+ for _, variable := range targetConfig.Variables {
+ prototypes[variable.Name] = variable.Type()
+ }
+
+ // Only display a module in an error message if we are not in the root module
+ modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
+ if len(n.ModulePath) == 1 {
+ modulePathDescription = ""
+ }
+
+ for name, declaredType := range prototypes {
+ proposedValue, ok := n.Variables[name]
+ if !ok {
+ // This means the default value should be used as no overriding value
+ // has been set. Therefore we should continue as no check is necessary.
+ continue
+ }
+
+ if proposedValue == config.UnknownVariableValue {
+ continue
+ }
+
+ switch declaredType {
+ case config.VariableTypeString:
+ switch proposedValue.(type) {
+ case string:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ case config.VariableTypeMap:
+ switch proposedValue.(type) {
+ case map[string]interface{}:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ case config.VariableTypeList:
+ switch proposedValue.(type) {
+ case []interface{}:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
+ name, modulePathDescription, declaredType.Printable())
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalSetVariables is an EvalNode implementation that sets the variables
+// explicitly for interpolation later.
+type EvalSetVariables struct {
+ Module *string
+ Variables map[string]interface{}
+}
+
+// TODO: test
+func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.SetVariables(*n.Module, n.Variables)
+ return nil, nil
+}
+
+// EvalVariableBlock is an EvalNode implementation that evaluates the
+// given configuration, and uses the final values as a way to set the
+// mapping.
+type EvalVariableBlock struct {
+ Config **ResourceConfig
+ VariableValues map[string]interface{}
+}
+
+func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
+ // Clear out the existing mapping
+ for k, _ := range n.VariableValues {
+ delete(n.VariableValues, k)
+ }
+
+ // Get our configuration
+ rc := *n.Config
+ for k, v := range rc.Config {
+ vKind := reflect.ValueOf(v).Type().Kind()
+
+ switch vKind {
+ case reflect.Slice:
+ var vSlice []interface{}
+ if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
+ n.VariableValues[k] = vSlice
+ continue
+ }
+ case reflect.Map:
+ var vMap map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
+ n.VariableValues[k] = vMap
+ continue
+ }
+ default:
+ var vString string
+ if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
+ n.VariableValues[k] = vString
+ continue
+ }
+ }
+
+ return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
+ }
+
+ for _, path := range rc.ComputedKeys {
+ log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
+ err := n.setUnknownVariableValueForPath(path)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
+ pathComponents := strings.Split(path, ".")
+
+ if len(pathComponents) < 1 {
+ return fmt.Errorf("No path comoponents in %s", path)
+ }
+
+ if len(pathComponents) == 1 {
+ // Special case the "top level" since we know the type
+ if _, ok := n.VariableValues[pathComponents[0]]; !ok {
+ n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
+ }
+ return nil
+ }
+
+ // Otherwise find the correct point in the tree and then set to unknown
+ var current interface{} = n.VariableValues[pathComponents[0]]
+ for i := 1; i < len(pathComponents); i++ {
+ switch tCurrent := current.(type) {
+ case []interface{}:
+ index, err := strconv.Atoi(pathComponents[i])
+ if err != nil {
+ return fmt.Errorf("Cannot convert %s to slice index in path %s",
+ pathComponents[i], path)
+ }
+ current = tCurrent[index]
+ case []map[string]interface{}:
+ index, err := strconv.Atoi(pathComponents[i])
+ if err != nil {
+ return fmt.Errorf("Cannot convert %s to slice index in path %s",
+ pathComponents[i], path)
+ }
+ current = tCurrent[index]
+ case map[string]interface{}:
+ if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
+ current = val
+ continue
+ }
+
+ tCurrent[pathComponents[i]] = config.UnknownVariableValue
+ break
+ }
+ }
+
+ return nil
+}
+
+// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
+// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
+// bare map literal is indistinguishable from a list of maps w/ one element.
+//
+// We take all the same inputs as EvalTypeCheckVariable above, since we need
+// both the target type and the proposed value in order to properly coerce.
+type EvalCoerceMapVariable struct {
+ Variables map[string]interface{}
+ ModulePath []string
+ ModuleTree *module.Tree
+}
+
+// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
+// details.
+func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
+ currentTree := n.ModuleTree
+ for _, pathComponent := range n.ModulePath[1:] {
+ currentTree = currentTree.Children()[pathComponent]
+ }
+ targetConfig := currentTree.Config()
+
+ prototypes := make(map[string]config.VariableType)
+ for _, variable := range targetConfig.Variables {
+ prototypes[variable.Name] = variable.Type()
+ }
+
+ for name, declaredType := range prototypes {
+ if declaredType != config.VariableTypeMap {
+ continue
+ }
+
+ proposedValue, ok := n.Variables[name]
+ if !ok {
+ continue
+ }
+
+ if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
+ if m, ok := list[0].(map[string]interface{}); ok {
+ log.Printf("[DEBUG] EvalCoerceMapVariable: "+
+ "Coercing single element list into map: %#v", m)
+ n.Variables[name] = m
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// hclTypeName returns the name of the type that would represent this value in
+// a config file, or falls back to the Go type name if there's no corresponding
+// HCL type. This is used for formatted output, not for comparing types.
+func hclTypeName(i interface{}) string {
+ switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
+ case reflect.Bool:
+ return "boolean"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return "number"
+ case reflect.Array, reflect.Slice:
+ return "list"
+ case reflect.Map:
+ return "map"
+ case reflect.String:
+ return "string"
+ default:
+ // fall back to the Go type if there's no match
+ return k.String()
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644
index 00000000..00392efe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+)
+
+// ProviderEvalTree returns the evaluation tree for initializing and
+// configuring providers.
+func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
+ var provider ResourceProvider
+ var resourceConfig *ResourceConfig
+
+ seq := make([]EvalNode, 0, 5)
+ seq = append(seq, &EvalInitProvider{Name: n})
+
+ // Input stuff
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkInput, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalInputProvider{
+ Name: n,
+ Provider: &provider,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkValidate},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalValidateProvider{
+ Provider: &provider,
+ Config: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ // Apply stuff
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ // We configure on everything but validate, since validate may
+ // not have access to all the variables.
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalConfigProvider{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ return &EvalSequence{Nodes: seq}
+}
+
+// CloseProviderEvalTree returns the evaluation tree for closing
+// provider connections that aren't needed anymore.
+func CloseProviderEvalTree(n string) EvalNode {
+ return &EvalCloseProvider{Name: n}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644
index 00000000..48ce6a33
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -0,0 +1,172 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "runtime/debug"
+ "strings"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// RootModuleName is the name given to the root module implicitly.
+const RootModuleName = "root"
+
+// RootModulePath is the path for the root module.
+var RootModulePath = []string{RootModuleName}
+
+// Graph represents the graph that Terraform uses to represent resources
+// and their dependencies.
+type Graph struct {
+ // Graph is the actual DAG. This is embedded so you can call the DAG
+ // methods directly.
+ dag.AcyclicGraph
+
+ // Path is the path in the module tree that this Graph represents.
+ // The root is represented by a single element list containing
+ // RootModuleName
+ Path []string
+
+ // debugName is a name for reference in the debug output. This is usually
+ // to indicate what topmost builder was, and if this graph is a shadow or
+ // not.
+ debugName string
+}
+
+func (g *Graph) DirectedGraph() dag.Grapher {
+ return &g.AcyclicGraph
+}
+
+// Walk walks the graph with the given walker for callbacks. The graph
+// will be walked with full parallelism, so the walker should expect
+// to be called in concurrently.
+func (g *Graph) Walk(walker GraphWalker) error {
+ return g.walk(walker)
+}
+
+func (g *Graph) walk(walker GraphWalker) error {
+ // The callbacks for enter/exiting a graph
+ ctx := walker.EnterPath(g.Path)
+ defer walker.ExitPath(g.Path)
+
+ // Get the path for logs
+ path := strings.Join(ctx.Path(), ".")
+
+ // Determine if our walker is a panic wrapper
+ panicwrap, ok := walker.(GraphWalkerPanicwrapper)
+ if !ok {
+ panicwrap = nil // just to be sure
+ }
+
+ debugName := "walk-graph.json"
+ if g.debugName != "" {
+ debugName = g.debugName + "-" + debugName
+ }
+
+ debugBuf := dbug.NewFileWriter(debugName)
+ g.SetDebugWriter(debugBuf)
+ defer debugBuf.Close()
+
+ // Walk the graph.
+ var walkFn dag.WalkFunc
+ walkFn = func(v dag.Vertex) (rerr error) {
+ log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
+ g.DebugVisitInfo(v, g.debugName)
+
+ // If we have a panic wrap GraphWalker and a panic occurs, recover
+ // and call that. We ensure the return value is an error, however,
+ // so that future nodes are not called.
+ defer func() {
+ // If no panicwrap, do nothing
+ if panicwrap == nil {
+ return
+ }
+
+ // If no panic, do nothing
+ err := recover()
+ if err == nil {
+ return
+ }
+
+ // Modify the return value to show the error
+ rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
+ dag.VertexName(v), err, debug.Stack())
+
+ // Call the panic wrapper
+ panicwrap.Panic(v, err)
+ }()
+
+ walker.EnterVertex(v)
+ defer walker.ExitVertex(v, rerr)
+
+ // vertexCtx is the context that we use when evaluating. This
+ // is normally the context of our graph but can be overridden
+ // with a GraphNodeSubPath impl.
+ vertexCtx := ctx
+ if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
+ vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
+ defer walker.ExitPath(pn.Path())
+ }
+
+ // If the node is eval-able, then evaluate it.
+ if ev, ok := v.(GraphNodeEvalable); ok {
+ tree := ev.EvalTree()
+ if tree == nil {
+ panic(fmt.Sprintf(
+ "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
+ }
+
+ // Allow the walker to change our tree if needed. Eval,
+ // then callback with the output.
+ log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
+
+ tree = walker.EnterEvalTree(v, tree)
+ output, err := Eval(tree, vertexCtx)
+ if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
+ return
+ }
+ }
+
+ // If the node is dynamically expanded, then expand it
+ if ev, ok := v.(GraphNodeDynamicExpandable); ok {
+ log.Printf(
+ "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
+ path,
+ dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
+
+ g, err := ev.DynamicExpand(vertexCtx)
+ if err != nil {
+ rerr = err
+ return
+ }
+ if g != nil {
+ // Walk the subgraph
+ if rerr = g.walk(walker); rerr != nil {
+ return
+ }
+ }
+ }
+
+ // If the node has a subgraph, then walk the subgraph
+ if sn, ok := v.(GraphNodeSubgraph); ok {
+ log.Printf(
+ "[DEBUG] vertex '%s.%s': walking subgraph",
+ path,
+ dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
+
+ if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
+ return
+ }
+ }
+
+ return nil
+ }
+
+ return g.AcyclicGraph.Walk(walkFn)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644
index 00000000..6374bb90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// GraphBuilder is an interface that can be implemented and used with
+// Terraform to build the graph that Terraform walks.
+type GraphBuilder interface {
+ // Build builds the graph for the given module path. It is up to
+ // the interface implementation whether this build should expand
+ // the graph or not.
+ Build(path []string) (*Graph, error)
+}
+
+// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
+// series of transforms and (optionally) validates the graph is a valid
+// structure.
+type BasicGraphBuilder struct {
+ Steps []GraphTransformer
+ Validate bool
+ // Optional name to add to the graph debug log
+ Name string
+}
+
+func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
+ g := &Graph{Path: path}
+
+ debugName := "graph.json"
+ if b.Name != "" {
+ debugName = b.Name + "-" + debugName
+ }
+ debugBuf := dbug.NewFileWriter(debugName)
+ g.SetDebugWriter(debugBuf)
+ defer debugBuf.Close()
+
+ for _, step := range b.Steps {
+ if step == nil {
+ continue
+ }
+
+ stepName := fmt.Sprintf("%T", step)
+ dot := strings.LastIndex(stepName, ".")
+ if dot >= 0 {
+ stepName = stepName[dot+1:]
+ }
+
+ debugOp := g.DebugOperation(stepName, "")
+ err := step.Transform(g)
+
+ errMsg := ""
+ if err != nil {
+ errMsg = err.Error()
+ }
+ debugOp.End(errMsg)
+
+ log.Printf(
+ "[TRACE] Graph after step %T:\n\n%s",
+ step, g.StringWithNodeTypes())
+
+ if err != nil {
+ return g, err
+ }
+ }
+
+ // Validate the graph structure
+ if b.Validate {
+ if err := g.Validate(); err != nil {
+ log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
+ return nil, err
+ }
+ }
+
+ return g, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644
index 00000000..38a90f27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -0,0 +1,141 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ApplyGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for applying a Terraform diff.
+//
+// Because the graph is built from the diff (vs. the config or state),
+// this helps ensure that the apply-time graph doesn't modify any resources
+// that aren't explicitly in the diff. There are other scenarios where the
+// diff can be deviated, so this is just one layer of protection.
+type ApplyGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // Diff is the diff to apply.
+ Diff *Diff
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Provisioners is the list of provisioners supported.
+ Provisioners []string
+
+ // Targets are resources to target. This is only required to make sure
+ // unnecessary outputs aren't included in the apply graph. The plan
+ // builder successfully handles targeting resources. In the future,
+ // outputs should go into the diff so that this is unnecessary.
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Destroy, if true, represents a pure destroy operation
+ Destroy bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "ApplyGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeApplyableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the nodes represented in the diff.
+ &DiffTransformer{
+ Concrete: concreteResource,
+
+ Diff: b.Diff,
+ Module: b.Module,
+ State: b.State,
+ },
+
+ // Create orphan output nodes
+ &OrphanOutputTransformer{Module: b.Module, State: b.State},
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Destruction ordering
+ &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+ GraphTransformIf(
+ func() bool { return !b.Destroy },
+ &CBDEdgeTransformer{Module: b.Module, State: b.State},
+ ),
+
+ // Provisioner-related transformations
+ &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+ &ProvisionerTransformer{},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Add the node to fix the state count boundaries
+ &CountBoundaryTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+ &CloseProvisionerTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644
index 00000000..014b348e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -0,0 +1,67 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
+// planning a pure-destroy.
+//
+// Planning a pure destroy operation is simple because we can ignore most
+// ordering configuration and simply reverse the state.
+type DestroyPlanGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Targets are resources to target
+ Targets []string
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "DestroyPlanGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlanDestroyableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the nodes represented in the state.
+ &StateTransformer{
+ Concrete: concreteResource,
+ State: b.State,
+ },
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Destruction ordering. We require this only so that
+ // targeting below will prune the correct things.
+ &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+
+ // Target. Note we don't set "Destroy: true" here since we already
+ // created proper destroy ordering.
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644
index 00000000..7070c59e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ImportGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for importing resources into Terraform. This is a much, much
+// simpler graph than a normal configuration graph.
+type ImportGraphBuilder struct {
+ // ImportTargets are the list of resources to import.
+ ImportTargets []*ImportTarget
+
+ // Module is the module to add to the graph. See ImportOpts.Module.
+ Module *module.Tree
+
+ // Providers is the list of providers supported.
+ Providers []string
+}
+
+// Build builds the graph according to the steps returned by Steps.
+func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: true,
+ Name: "ImportGraphBuilder",
+ }).Build(path)
+}
+
+// Steps returns the ordered list of GraphTransformers that must be executed
+// to build a complete graph.
+func (b *ImportGraphBuilder) Steps() []GraphTransformer {
+ // Get the module. If we don't have one, we just use an empty tree
+ // so that the transform still works but does nothing.
+ mod := b.Module
+ if mod == nil {
+ mod = module.NewEmptyTree()
+ }
+
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Create all our resources from the configuration and state
+ &ConfigTransformer{Module: mod},
+
+ // Add the import steps
+ &ImportStateTransformer{Targets: b.ImportTargets},
+
+ // Provider-related transformations
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: mod},
+
+ // This validates that the providers only depend on variables
+ &ImportProviderValidateTransformer{},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+
+ // Single root
+ &RootTransformer{},
+
+ // Optimize
+ &TransitiveReductionTransformer{},
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644
index 00000000..0df48cdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
@@ -0,0 +1,27 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// InputGraphBuilder creates the graph for the input operation.
+//
+// Unlike other graph builders, this is a function since it currently modifies
+// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
+// modified and should not be used for any other operations.
+func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+ // We're going to customize the concrete functions
+ p.CustomConcrete = true
+
+ // Set the provider to the normal provider. This will ask for input.
+ p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ // We purposely don't set any more concrete fields since the remainder
+ // should be no-ops.
+
+ return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644
index 00000000..02d86970
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -0,0 +1,161 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// PlanGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for planning (creating a Terraform Diff).
+//
+// The primary difference between this graph and others:
+//
+// * Based on the config since it represents the target state
+//
+// * Ignores lifecycle options since no lifecycle events occur here. This
+// simplifies the graph significantly since complex transforms such as
+// create-before-destroy can be completely ignored.
+//
+type PlanGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Provisioners is the list of provisioners supported.
+ Provisioners []string
+
+ // Targets are resources to target
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+
+ // CustomConcrete can be set to customize the node types created
+ // for various parts of the plan. This is useful in order to customize
+ // the plan behavior.
+ CustomConcrete bool
+ ConcreteProvider ConcreteProviderNodeFunc
+ ConcreteResource ConcreteResourceNodeFunc
+ ConcreteResourceOrphan ConcreteResourceNodeFunc
+
+ once sync.Once
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "PlanGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Steps() []GraphTransformer {
+ b.once.Do(b.init)
+
+ steps := []GraphTransformer{
+ // Creates all the resources represented in the config
+ &ConfigTransformer{
+ Concrete: b.ConcreteResource,
+ Module: b.Module,
+ },
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add orphan resources
+ &OrphanResourceTransformer{
+ Concrete: b.ConcreteResourceOrphan,
+ State: b.State,
+ Module: b.Module,
+ },
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Provisioner-related transformations. Only add these if requested.
+ GraphTransformIf(
+ func() bool { return b.Provisioners != nil },
+ GraphTransformMulti(
+ &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+ &ProvisionerTransformer{},
+ ),
+ ),
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect so that the references are ready for targeting. We'll
+ // have to connect again later for providers and so on.
+ &ReferenceTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+ &CloseProvisionerTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
+
+func (b *PlanGraphBuilder) init() {
+ // Do nothing if the user requests customizing the fields
+ if b.CustomConcrete {
+ return
+ }
+
+ b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlannableResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlannableResourceOrphan{
+ NodeAbstractResource: a,
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644
index 00000000..88ae3380
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// RefreshGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for refreshing (updating the Terraform state).
+//
+// The primary difference between this graph and others:
+//
+// * Based on the state since it represents the only resources that
+// need to be refreshed.
+//
+// * Ignores lifecycle options since no lifecycle events occur here. This
+// simplifies the graph significantly since complex transforms such as
+// create-before-destroy can be completely ignored.
+//
+type RefreshGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Targets are resources to target
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "RefreshGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeRefreshableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeRefreshableDataResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the resources represented in the state
+ &StateTransformer{
+ Concrete: concreteResource,
+ State: b.State,
+ },
+
+ // Creates all the data resources that aren't in the state
+ &ConfigTransformer{
+ Concrete: concreteDataResource,
+ Module: b.Module,
+ Unique: true,
+ ModeFilter: true,
+ Mode: config.DataResourceMode,
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect so that the references are ready for targeting. We'll
+ // have to connect again later for providers and so on.
+ &ReferenceTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644
index 00000000..645ec7be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -0,0 +1,36 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ValidateGraphBuilder creates the graph for the validate operation.
+//
+// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
+// we only have to validate what we'd normally plan anyways. The
+// PlanGraphBuilder given will be modified so it shouldn't be used for anything
+// else after calling this function.
+func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+ // We're going to customize the concrete functions
+ p.CustomConcrete = true
+
+ // Set the provider to the normal provider. This will ask for input.
+ p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeValidatableResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ // We purposely don't set any other concrete types since they don't
+ // require validation.
+
+ return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644
index 00000000..73e3821f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
@@ -0,0 +1,9 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+// GraphDot returns the dot formatting of a visual representation of
+// the given Terraform graph.
+func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
+ return string(g.Dot(opts)), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644
index 00000000..2897eb54
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -0,0 +1,7 @@
+package terraform
+
+// GraphNodeSubPath says that a node is part of a graph with a
+// different path, and the context should be adjusted accordingly.
+type GraphNodeSubPath interface {
+ Path() []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644
index 00000000..34ce6f64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -0,0 +1,60 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphWalker is an interface that can be implemented that when used
+// with Graph.Walk will invoke the given callbacks under certain events.
+type GraphWalker interface {
+ EnterPath([]string) EvalContext
+ ExitPath([]string)
+ EnterVertex(dag.Vertex)
+ ExitVertex(dag.Vertex, error)
+ EnterEvalTree(dag.Vertex, EvalNode) EvalNode
+ ExitEvalTree(dag.Vertex, interface{}, error) error
+}
+
+// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
+// that occur while walking the graph. This is not generally recommended
+// since panics should crash Terraform and result in a bug report. However,
+// this is particularly useful for situations like the shadow graph where
+// you don't ever want to cause a panic.
+type GraphWalkerPanicwrapper interface {
+ GraphWalker
+
+ // Panic is called when a panic occurs. This will halt the panic from
+ // propogating so if the walker wants it to crash still it should panic
+ // again. This is called from within a defer so runtime/debug.Stack can
+ // be used to get the stack trace of the panic.
+ Panic(dag.Vertex, interface{})
+}
+
+// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
+// the panics. This doesn't lose the panics since the panics are still
+// returned as errors as part of a graph walk.
+func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
+ return &graphWalkerPanicwrapper{
+ GraphWalker: w,
+ }
+}
+
+type graphWalkerPanicwrapper struct {
+ GraphWalker
+}
+
+func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
+
+// NullGraphWalker is a GraphWalker implementation that does nothing.
+// This can be embedded within other GraphWalker implementations for easily
+// implementing all the required functions.
+type NullGraphWalker struct{}
+
+func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
+func (NullGraphWalker) ExitPath([]string) {}
+func (NullGraphWalker) EnterVertex(dag.Vertex) {}
+func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
+func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
+func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644
index 00000000..e63b4603
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -0,0 +1,157 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ContextGraphWalker is the GraphWalker implementation used with the
+// Context struct to walk and evaluate the graph.
+type ContextGraphWalker struct {
+ NullGraphWalker
+
+ // Configurable values
+ Context *Context
+ Operation walkOperation
+ StopContext context.Context
+
+ // Outputs, do not set these. Do not read these while the graph
+ // is being walked.
+ ValidationWarnings []string
+ ValidationErrors []error
+
+ errorLock sync.Mutex
+ once sync.Once
+ contexts map[string]*BuiltinEvalContext
+ contextLock sync.Mutex
+ interpolaterVars map[string]map[string]interface{}
+ interpolaterVarLock sync.Mutex
+ providerCache map[string]ResourceProvider
+ providerConfigCache map[string]*ResourceConfig
+ providerLock sync.Mutex
+ provisionerCache map[string]ResourceProvisioner
+ provisionerLock sync.Mutex
+}
+
+func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
+ w.once.Do(w.init)
+
+ w.contextLock.Lock()
+ defer w.contextLock.Unlock()
+
+ // If we already have a context for this path cached, use that
+ key := PathCacheKey(path)
+ if ctx, ok := w.contexts[key]; ok {
+ return ctx
+ }
+
+ // Setup the variables for this interpolater
+ variables := make(map[string]interface{})
+ if len(path) <= 1 {
+ for k, v := range w.Context.variables {
+ variables[k] = v
+ }
+ }
+ w.interpolaterVarLock.Lock()
+ if m, ok := w.interpolaterVars[key]; ok {
+ for k, v := range m {
+ variables[k] = v
+ }
+ }
+ w.interpolaterVars[key] = variables
+ w.interpolaterVarLock.Unlock()
+
+ ctx := &BuiltinEvalContext{
+ StopContext: w.StopContext,
+ PathValue: path,
+ Hooks: w.Context.hooks,
+ InputValue: w.Context.uiInput,
+ Components: w.Context.components,
+ ProviderCache: w.providerCache,
+ ProviderConfigCache: w.providerConfigCache,
+ ProviderInputConfig: w.Context.providerInputConfig,
+ ProviderLock: &w.providerLock,
+ ProvisionerCache: w.provisionerCache,
+ ProvisionerLock: &w.provisionerLock,
+ DiffValue: w.Context.diff,
+ DiffLock: &w.Context.diffLock,
+ StateValue: w.Context.state,
+ StateLock: &w.Context.stateLock,
+ Interpolater: &Interpolater{
+ Operation: w.Operation,
+ Meta: w.Context.meta,
+ Module: w.Context.module,
+ State: w.Context.state,
+ StateLock: &w.Context.stateLock,
+ VariableValues: variables,
+ VariableValuesLock: &w.interpolaterVarLock,
+ },
+ InterpolaterVars: w.interpolaterVars,
+ InterpolaterVarLock: &w.interpolaterVarLock,
+ }
+
+ w.contexts[key] = ctx
+ return ctx
+}
+
+func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
+ log.Printf("[TRACE] [%s] Entering eval tree: %s",
+ w.Operation, dag.VertexName(v))
+
+ // Acquire a lock on the semaphore
+ w.Context.parallelSem.Acquire()
+
+ // We want to filter the evaluation tree to only include operations
+ // that belong in this operation.
+ return EvalFilter(n, EvalNodeFilterOp(w.Operation))
+}
+
+func (w *ContextGraphWalker) ExitEvalTree(
+ v dag.Vertex, output interface{}, err error) error {
+ log.Printf("[TRACE] [%s] Exiting eval tree: %s",
+ w.Operation, dag.VertexName(v))
+
+ // Release the semaphore
+ w.Context.parallelSem.Release()
+
+ if err == nil {
+ return nil
+ }
+
+ // Acquire the lock because anything is going to require a lock.
+ w.errorLock.Lock()
+ defer w.errorLock.Unlock()
+
+ // Try to get a validation error out of it. If its not a validation
+ // error, then just record the normal error.
+ verr, ok := err.(*EvalValidateError)
+ if !ok {
+ return err
+ }
+
+ for _, msg := range verr.Warnings {
+ w.ValidationWarnings = append(
+ w.ValidationWarnings,
+ fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
+ }
+ for _, e := range verr.Errors {
+ w.ValidationErrors = append(
+ w.ValidationErrors,
+ errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
+ }
+
+ return nil
+}
+
+func (w *ContextGraphWalker) init() {
+ w.contexts = make(map[string]*BuiltinEvalContext, 5)
+ w.providerCache = make(map[string]ResourceProvider, 5)
+ w.providerConfigCache = make(map[string]*ResourceConfig, 5)
+ w.provisionerCache = make(map[string]ResourceProvisioner, 5)
+ w.interpolaterVars = make(map[string]map[string]interface{}, 5)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644
index 00000000..3fb37481
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -0,0 +1,18 @@
+package terraform
+
+//go:generate stringer -type=walkOperation graph_walk_operation.go
+
+// walkOperation is an enum which tells the walkContext what to do.
+type walkOperation byte
+
+const (
+ walkInvalid walkOperation = iota
+ walkInput
+ walkApply
+ walkPlan
+ walkPlanDestroy
+ walkRefresh
+ walkValidate
+ walkDestroy
+ walkImport
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644
index 00000000..e97b4855
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
+
+var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
+
+func (i GraphType) String() string {
+ if i >= GraphType(len(_GraphType_index)-1) {
+ return fmt.Sprintf("GraphType(%d)", i)
+ }
+ return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644
index 00000000..ab11e8ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -0,0 +1,137 @@
+package terraform
+
+// HookAction is an enum of actions that can be taken as a result of a hook
+// callback. This allows you to modify the behavior of Terraform at runtime.
+type HookAction byte
+
+const (
+ // HookActionContinue continues with processing as usual.
+ HookActionContinue HookAction = iota
+
+ // HookActionHalt halts immediately: no more hooks are processed
+ // and the action that Terraform was about to take is cancelled.
+ HookActionHalt
+)
+
+// Hook is the interface that must be implemented to hook into various
+// parts of Terraform, allowing you to inspect or change behavior at runtime.
+//
+// There are MANY hook points into Terraform. If you only want to implement
+// some hook points, but not all (which is the likely case), then embed the
+// NilHook into your struct, which implements all of the interface but does
+// nothing. Then, override only the functions you want to implement.
+type Hook interface {
+ // PreApply and PostApply are called before and after a single
+ // resource is applied. The error argument in PostApply is the
+ // error, if any, that was returned from the provider Apply call itself.
+ PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
+ PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+ // PreDiff and PostDiff are called before and after a single resource
+ // resource is diffed.
+ PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
+
+ // Provisioning hooks
+ //
+ // All should be self-explanatory. ProvisionOutput is called with
+ // output sent back by the provisioners. This will be called multiple
+ // times as output comes in, but each call should represent a line of
+ // output. The ProvisionOutput method cannot control whether the
+ // hook continues running.
+ PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+ PreProvision(*InstanceInfo, string) (HookAction, error)
+ PostProvision(*InstanceInfo, string, error) (HookAction, error)
+ ProvisionOutput(*InstanceInfo, string, string)
+
+ // PreRefresh and PostRefresh are called before and after a single
+ // resource state is refreshed, respectively.
+ PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+
+ // PostStateUpdate is called after the state is updated.
+ PostStateUpdate(*State) (HookAction, error)
+
+ // PreImportState and PostImportState are called before and after
+ // a single resource's state is being improted.
+ PreImportState(*InstanceInfo, string) (HookAction, error)
+ PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
+}
+
+// NilHook is a Hook implementation that does nothing. It exists only to
+// simplify implementing hooks. You can embed this into your Hook implementation
+// and only implement the functions you are interested in.
+type NilHook struct{}
+
+func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) ProvisionOutput(
+ *InstanceInfo, string, string) {
+}
+
+func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+// handleHook turns hook actions into panics. This lets you use the
+// panic/recover mechanism in Go as a flow control mechanism for hook
+// actions.
+func handleHook(a HookAction, err error) {
+ if err != nil {
+ // TODO: handle errors
+ }
+
+ switch a {
+ case HookActionContinue:
+ return
+ case HookActionHalt:
+ panic(HookActionHalt)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644
index 00000000..0e464006
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -0,0 +1,245 @@
+package terraform
+
+import "sync"
+
+// MockHook is an implementation of Hook that can be used for tests.
+// It records all of its function calls.
+type MockHook struct {
+ sync.Mutex
+
+ PreApplyCalled bool
+ PreApplyInfo *InstanceInfo
+ PreApplyDiff *InstanceDiff
+ PreApplyState *InstanceState
+ PreApplyReturn HookAction
+ PreApplyError error
+
+ PostApplyCalled bool
+ PostApplyInfo *InstanceInfo
+ PostApplyState *InstanceState
+ PostApplyError error
+ PostApplyReturn HookAction
+ PostApplyReturnError error
+ PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+ PreDiffCalled bool
+ PreDiffInfo *InstanceInfo
+ PreDiffState *InstanceState
+ PreDiffReturn HookAction
+ PreDiffError error
+
+ PostDiffCalled bool
+ PostDiffInfo *InstanceInfo
+ PostDiffDiff *InstanceDiff
+ PostDiffReturn HookAction
+ PostDiffError error
+
+ PreProvisionResourceCalled bool
+ PreProvisionResourceInfo *InstanceInfo
+ PreProvisionInstanceState *InstanceState
+ PreProvisionResourceReturn HookAction
+ PreProvisionResourceError error
+
+ PostProvisionResourceCalled bool
+ PostProvisionResourceInfo *InstanceInfo
+ PostProvisionInstanceState *InstanceState
+ PostProvisionResourceReturn HookAction
+ PostProvisionResourceError error
+
+ PreProvisionCalled bool
+ PreProvisionInfo *InstanceInfo
+ PreProvisionProvisionerId string
+ PreProvisionReturn HookAction
+ PreProvisionError error
+
+ PostProvisionCalled bool
+ PostProvisionInfo *InstanceInfo
+ PostProvisionProvisionerId string
+ PostProvisionErrorArg error
+ PostProvisionReturn HookAction
+ PostProvisionError error
+
+ ProvisionOutputCalled bool
+ ProvisionOutputInfo *InstanceInfo
+ ProvisionOutputProvisionerId string
+ ProvisionOutputMessage string
+
+ PostRefreshCalled bool
+ PostRefreshInfo *InstanceInfo
+ PostRefreshState *InstanceState
+ PostRefreshReturn HookAction
+ PostRefreshError error
+
+ PreRefreshCalled bool
+ PreRefreshInfo *InstanceInfo
+ PreRefreshState *InstanceState
+ PreRefreshReturn HookAction
+ PreRefreshError error
+
+ PreImportStateCalled bool
+ PreImportStateInfo *InstanceInfo
+ PreImportStateId string
+ PreImportStateReturn HookAction
+ PreImportStateError error
+
+ PostImportStateCalled bool
+ PostImportStateInfo *InstanceInfo
+ PostImportStateState []*InstanceState
+ PostImportStateReturn HookAction
+ PostImportStateError error
+
+ PostStateUpdateCalled bool
+ PostStateUpdateState *State
+ PostStateUpdateReturn HookAction
+ PostStateUpdateError error
+}
+
+func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreApplyCalled = true
+ h.PreApplyInfo = n
+ h.PreApplyDiff = d
+ h.PreApplyState = s
+ return h.PreApplyReturn, h.PreApplyError
+}
+
+func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostApplyCalled = true
+ h.PostApplyInfo = n
+ h.PostApplyState = s
+ h.PostApplyError = e
+
+ if h.PostApplyFn != nil {
+ return h.PostApplyFn(n, s, e)
+ }
+
+ return h.PostApplyReturn, h.PostApplyReturnError
+}
+
+func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreDiffCalled = true
+ h.PreDiffInfo = n
+ h.PreDiffState = s
+ return h.PreDiffReturn, h.PreDiffError
+}
+
+func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostDiffCalled = true
+ h.PostDiffInfo = n
+ h.PostDiffDiff = d
+ return h.PostDiffReturn, h.PostDiffError
+}
+
+func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreProvisionResourceCalled = true
+ h.PreProvisionResourceInfo = n
+ h.PreProvisionInstanceState = s
+ return h.PreProvisionResourceReturn, h.PreProvisionResourceError
+}
+
+func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostProvisionResourceCalled = true
+ h.PostProvisionResourceInfo = n
+ h.PostProvisionInstanceState = s
+ return h.PostProvisionResourceReturn, h.PostProvisionResourceError
+}
+
+func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreProvisionCalled = true
+ h.PreProvisionInfo = n
+ h.PreProvisionProvisionerId = provId
+ return h.PreProvisionReturn, h.PreProvisionError
+}
+
+func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostProvisionCalled = true
+ h.PostProvisionInfo = n
+ h.PostProvisionProvisionerId = provId
+ h.PostProvisionErrorArg = err
+ return h.PostProvisionReturn, h.PostProvisionError
+}
+
+func (h *MockHook) ProvisionOutput(
+ n *InstanceInfo,
+ provId string,
+ msg string) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.ProvisionOutputCalled = true
+ h.ProvisionOutputInfo = n
+ h.ProvisionOutputProvisionerId = provId
+ h.ProvisionOutputMessage = msg
+}
+
+func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreRefreshCalled = true
+ h.PreRefreshInfo = n
+ h.PreRefreshState = s
+ return h.PreRefreshReturn, h.PreRefreshError
+}
+
+func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostRefreshCalled = true
+ h.PostRefreshInfo = n
+ h.PostRefreshState = s
+ return h.PostRefreshReturn, h.PostRefreshError
+}
+
+func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreImportStateCalled = true
+ h.PreImportStateInfo = info
+ h.PreImportStateId = id
+ return h.PreImportStateReturn, h.PreImportStateError
+}
+
+func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostImportStateCalled = true
+ h.PostImportStateInfo = info
+ h.PostImportStateState = s
+ return h.PostImportStateReturn, h.PostImportStateError
+}
+
+func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostStateUpdateCalled = true
+ h.PostStateUpdateState = s
+ return h.PostStateUpdateReturn, h.PostStateUpdateError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644
index 00000000..104d0098
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -0,0 +1,87 @@
+package terraform
+
+import (
+ "sync/atomic"
+)
+
+// stopHook is a private Hook implementation that Terraform uses to
+// signal when to stop or cancel actions.
+type stopHook struct {
+ stop uint32
+}
+
+func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
+}
+
+func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) hook() (HookAction, error) {
+ if h.Stopped() {
+ return HookActionHalt, nil
+ }
+
+ return HookActionContinue, nil
+}
+
+// reset should be called within the lock context
+func (h *stopHook) Reset() {
+ atomic.StoreUint32(&h.stop, 0)
+}
+
+func (h *stopHook) Stop() {
+ atomic.StoreUint32(&h.stop, 1)
+}
+
+func (h *stopHook) Stopped() bool {
+ return atomic.LoadUint32(&h.stop) == 1
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644
index 00000000..08959717
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
@@ -0,0 +1,13 @@
+package terraform
+
+//go:generate stringer -type=InstanceType instancetype.go
+
+// InstanceType is an enum of the various types of instances store in the State
+type InstanceType int
+
+const (
+ TypeInvalid InstanceType = iota
+ TypePrimary
+ TypeTainted
+ TypeDeposed
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644
index 00000000..f69267cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
+
+var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
+
+func (i InstanceType) String() string {
+ if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
+ return fmt.Sprintf("InstanceType(%d)", i)
+ }
+ return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644
index 00000000..855548c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -0,0 +1,790 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/flatmap"
+)
+
+const (
+ // VarEnvPrefix is the prefix of variables that are read from
+ // the environment to set variables here.
+ VarEnvPrefix = "TF_VAR_"
+)
+
+// Interpolater is the structure responsible for determining the values
+// for interpolations such as `aws_instance.foo.bar`.
+type Interpolater struct {
+ Operation walkOperation
+ Meta *ContextMeta
+ Module *module.Tree
+ State *State
+ StateLock *sync.RWMutex
+ VariableValues map[string]interface{}
+ VariableValuesLock *sync.Mutex
+}
+
+// InterpolationScope is the current scope of execution. This is required
+// since some variables which are interpolated are dependent on what we're
+// operating on and where we are.
+type InterpolationScope struct {
+ Path []string
+ Resource *Resource
+}
+
+// Values returns the values for all the variables in the given map.
+func (i *Interpolater) Values(
+ scope *InterpolationScope,
+ vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
+ if scope == nil {
+ scope = &InterpolationScope{}
+ }
+
+ result := make(map[string]ast.Variable, len(vars))
+
+ // Copy the default variables
+ if i.Module != nil && scope != nil {
+ mod := i.Module
+ if len(scope.Path) > 1 {
+ mod = i.Module.Child(scope.Path[1:])
+ }
+ for _, v := range mod.Config().Variables {
+ // Set default variables
+ if v.Default == nil {
+ continue
+ }
+
+ n := fmt.Sprintf("var.%s", v.Name)
+ variable, err := hil.InterfaceToVariable(v.Default)
+ if err != nil {
+ return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
+ }
+
+ result[n] = variable
+ }
+ }
+
+ for n, rawV := range vars {
+ var err error
+ switch v := rawV.(type) {
+ case *config.CountVariable:
+ err = i.valueCountVar(scope, n, v, result)
+ case *config.ModuleVariable:
+ err = i.valueModuleVar(scope, n, v, result)
+ case *config.PathVariable:
+ err = i.valuePathVar(scope, n, v, result)
+ case *config.ResourceVariable:
+ err = i.valueResourceVar(scope, n, v, result)
+ case *config.SelfVariable:
+ err = i.valueSelfVar(scope, n, v, result)
+ case *config.SimpleVariable:
+ err = i.valueSimpleVar(scope, n, v, result)
+ case *config.TerraformVariable:
+ err = i.valueTerraformVar(scope, n, v, result)
+ case *config.UserVariable:
+ err = i.valueUserVar(scope, n, v, result)
+ default:
+ err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
+
+func (i *Interpolater) valueCountVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.CountVariable,
+ result map[string]ast.Variable) error {
+ switch v.Type {
+ case config.CountValueIndex:
+ if scope.Resource == nil {
+ return fmt.Errorf("%s: count.index is only valid within resources", n)
+ }
+ result[n] = ast.Variable{
+ Value: scope.Resource.CountIndex,
+ Type: ast.TypeInt,
+ }
+ return nil
+ default:
+ return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
+ }
+}
+
+func unknownVariable() ast.Variable {
+ return ast.Variable{
+ Type: ast.TypeUnknown,
+ Value: config.UnknownVariableValue,
+ }
+}
+
+func unknownValue() string {
+ return hil.UnknownValue
+}
+
+func (i *Interpolater) valueModuleVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.ModuleVariable,
+ result map[string]ast.Variable) error {
+
+ // Build the path to the child module we want
+ path := make([]string, len(scope.Path), len(scope.Path)+1)
+ copy(path, scope.Path)
+ path = append(path, v.Name)
+
+ // Grab the lock so that if other interpolations are running or
+ // state is being modified, we'll be safe.
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ // Get the module where we're looking for the value
+ mod := i.State.ModuleByPath(path)
+ if mod == nil {
+ // If the module doesn't exist, then we can return an empty string.
+ // This happens usually only in Refresh() when we haven't populated
+ // a state. During validation, we semantically verify that all
+ // modules reference other modules, and graph ordering should
+ // ensure that the module is in the state, so if we reach this
+ // point otherwise it really is a panic.
+ result[n] = unknownVariable()
+
+ // During apply this is always an error
+ if i.Operation == walkApply {
+ return fmt.Errorf(
+ "Couldn't find module %q for var: %s",
+ v.Name, v.FullKey())
+ }
+ } else {
+ // Get the value from the outputs
+ if outputState, ok := mod.Outputs[v.Field]; ok {
+ output, err := hil.InterfaceToVariable(outputState.Value)
+ if err != nil {
+ return err
+ }
+ result[n] = output
+ } else {
+ // Same reasons as the comment above.
+ result[n] = unknownVariable()
+
+ // During apply this is always an error
+ if i.Operation == walkApply {
+ return fmt.Errorf(
+ "Couldn't find output %q for module var: %s",
+ v.Field, v.FullKey())
+ }
+ }
+ }
+
+ return nil
+}
+
+func (i *Interpolater) valuePathVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.PathVariable,
+ result map[string]ast.Variable) error {
+ switch v.Type {
+ case config.PathValueCwd:
+ wd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf(
+ "Couldn't get cwd for var %s: %s",
+ v.FullKey(), err)
+ }
+
+ result[n] = ast.Variable{
+ Value: wd,
+ Type: ast.TypeString,
+ }
+ case config.PathValueModule:
+ if t := i.Module.Child(scope.Path[1:]); t != nil {
+ result[n] = ast.Variable{
+ Value: t.Config().Dir,
+ Type: ast.TypeString,
+ }
+ }
+ case config.PathValueRoot:
+ result[n] = ast.Variable{
+ Value: i.Module.Config().Dir,
+ Type: ast.TypeString,
+ }
+ default:
+ return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
+ }
+
+ return nil
+
+}
+
+func (i *Interpolater) valueResourceVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.ResourceVariable,
+ result map[string]ast.Variable) error {
+ // If we're computing all dynamic fields, then module vars count
+ // and we mark it as computed.
+ if i.Operation == walkValidate {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ var variable *ast.Variable
+ var err error
+
+ if v.Multi && v.Index == -1 {
+ variable, err = i.computeResourceMultiVariable(scope, v)
+ } else {
+ variable, err = i.computeResourceVariable(scope, v)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if variable == nil {
+ // During the input walk we tolerate missing variables because
+ // we haven't yet had a chance to refresh state, so dynamic data may
+ // not yet be complete.
+ // If it truly is missing, we'll catch it on a later walk.
+ // This applies only to graph nodes that interpolate during the
+ // config walk, e.g. providers.
+ if i.Operation == walkInput || i.Operation == walkRefresh {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
+ }
+
+ result[n] = *variable
+ return nil
+}
+
+func (i *Interpolater) valueSelfVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.SelfVariable,
+ result map[string]ast.Variable) error {
+ if scope == nil || scope.Resource == nil {
+ return fmt.Errorf(
+ "%s: invalid scope, self variables are only valid on resources", n)
+ }
+
+ rv, err := config.NewResourceVariable(fmt.Sprintf(
+ "%s.%s.%d.%s",
+ scope.Resource.Type,
+ scope.Resource.Name,
+ scope.Resource.CountIndex,
+ v.Field))
+ if err != nil {
+ return err
+ }
+
+ return i.valueResourceVar(scope, n, rv, result)
+}
+
+func (i *Interpolater) valueSimpleVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.SimpleVariable,
+ result map[string]ast.Variable) error {
+ // This error message includes some information for people who
+ // relied on this for their template_file data sources. We should
+ // remove this at some point but there isn't any rush.
+ return fmt.Errorf(
+ "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
+ "then you must escape the interpolation with two dollar signs. For\n"+
+ "example: ${a} becomes $${a}.",
+ n, n)
+}
+
+func (i *Interpolater) valueTerraformVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.TerraformVariable,
+ result map[string]ast.Variable) error {
+ if v.Field != "env" {
+ return fmt.Errorf(
+ "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
+ }
+
+ if i.Meta == nil {
+ return fmt.Errorf(
+ "%s: internal error: nil Meta. Please report a bug.", n)
+ }
+
+ result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
+ return nil
+}
+
+func (i *Interpolater) valueUserVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.UserVariable,
+ result map[string]ast.Variable) error {
+ i.VariableValuesLock.Lock()
+ defer i.VariableValuesLock.Unlock()
+ val, ok := i.VariableValues[v.Name]
+ if ok {
+ varValue, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+ v.Name, val, err)
+ }
+ result[n] = varValue
+ return nil
+ }
+
+ if _, ok := result[n]; !ok && i.Operation == walkValidate {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ // Look up if we have any variables with this prefix because
+ // those are map overrides. Include those.
+ for k, val := range i.VariableValues {
+ if strings.HasPrefix(k, v.Name+".") {
+ keyComponents := strings.Split(k, ".")
+ overrideKey := keyComponents[len(keyComponents)-1]
+
+ mapInterface, ok := result["var."+v.Name]
+ if !ok {
+ return fmt.Errorf("override for non-existent variable: %s", v.Name)
+ }
+
+ mapVariable := mapInterface.Value.(map[string]ast.Variable)
+
+ varValue, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+ v.Name, val, err)
+ }
+ mapVariable[overrideKey] = varValue
+ }
+ }
+
+ return nil
+}
+
+func (i *Interpolater) computeResourceVariable(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ast.Variable, error) {
+ id := v.ResourceId()
+ if v.Multi {
+ id = fmt.Sprintf("%s.%d", id, v.Index)
+ }
+
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ unknownVariable := unknownVariable()
+
+ // These variables must be declared early because of the use of GOTO
+ var isList bool
+ var isMap bool
+
+ // Get the information about this resource variable, and verify
+ // that it exists and such.
+ module, cr, err := i.resourceVariableInfo(scope, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // If we're requesting "count" its a special variable that we grab
+ // directly from the config itself.
+ if v.Field == "count" {
+ var count int
+ if cr != nil {
+ count, err = cr.Count()
+ } else {
+ count, err = i.resourceCountMax(module, cr, v)
+ }
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading %s count: %s",
+ v.ResourceId(),
+ err)
+ }
+
+ return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
+ }
+
+ // Get the resource out from the state. We know the state exists
+ // at this point and if there is a state, we expect there to be a
+ // resource with the given name.
+ var r *ResourceState
+ if module != nil && len(module.Resources) > 0 {
+ var ok bool
+ r, ok = module.Resources[id]
+ if !ok && v.Multi && v.Index == 0 {
+ r, ok = module.Resources[v.ResourceId()]
+ }
+ if !ok {
+ r = nil
+ }
+ }
+ if r == nil || r.Primary == nil {
+ if i.Operation == walkApply || i.Operation == walkPlan {
+ return nil, fmt.Errorf(
+ "Resource '%s' not found for variable '%s'",
+ v.ResourceId(),
+ v.FullKey())
+ }
+
+ // If we have no module in the state yet or count, return empty.
+ // NOTE(@mitchellh): I actually don't know why this is here. During
+ // a refactor I kept this here to maintain the same behavior, but
+ // I'm not sure why its here.
+ if module == nil || len(module.Resources) == 0 {
+ return nil, nil
+ }
+
+ goto MISSING
+ }
+
+ if attr, ok := r.Primary.Attributes[v.Field]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+
+ // computed list or map attribute
+ _, isList = r.Primary.Attributes[v.Field+".#"]
+ _, isMap = r.Primary.Attributes[v.Field+".%"]
+ if isList || isMap {
+ variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+ return &variable, err
+ }
+
+ // At apply time, we can't do the "maybe has it" check below
+ // that we need for plans since parent elements might be computed.
+ // Therefore, it is an error and we're missing the key.
+ //
+ // TODO: test by creating a state and configuration that is referencing
+ // a non-existent variable "foo.bar" where the state only has "foo"
+ // and verify plan works, but apply doesn't.
+ if i.Operation == walkApply || i.Operation == walkDestroy {
+ goto MISSING
+ }
+
+ // We didn't find the exact field, so lets separate the dots
+ // and see if anything along the way is a computed set. i.e. if
+ // we have "foo.0.bar" as the field, check to see if "foo" is
+ // a computed list. If so, then the whole thing is computed.
+ if parts := strings.Split(v.Field, "."); len(parts) > 1 {
+ for i := 1; i < len(parts); i++ {
+ // Lists and sets make this
+ key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
+ if attr, ok := r.Primary.Attributes[key]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+
+ // Maps make this
+ key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
+ if attr, ok := r.Primary.Attributes[key]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+ }
+ }
+
+MISSING:
+ // Validation for missing interpolations should happen at a higher
+ // semantic level. If we reached this point and don't have variables,
+ // just return the computed value.
+ if scope == nil && scope.Resource == nil {
+ return &unknownVariable, nil
+ }
+
+ // If the operation is refresh, it isn't an error for a value to
+ // be unknown. Instead, we return that the value is computed so
+ // that the graph can continue to refresh other nodes. It doesn't
+ // matter because the config isn't interpolated anyways.
+ //
+ // For a Destroy, we're also fine with computed values, since our goal is
+ // only to get destroy nodes for existing resources.
+ //
+ // For an input walk, computed values are okay to return because we're only
+ // looking for missing variables to prompt the user for.
+ if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ return nil, fmt.Errorf(
+ "Resource '%s' does not have attribute '%s' "+
+ "for variable '%s'",
+ id,
+ v.Field,
+ v.FullKey())
+}
+
+func (i *Interpolater) computeResourceMultiVariable(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ast.Variable, error) {
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ unknownVariable := unknownVariable()
+
+ // If we're only looking for input, we don't need to expand a
+ // multi-variable. This prevents us from encountering things that should be
+ // known but aren't because the state has yet to be refreshed.
+ if i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ // Get the information about this resource variable, and verify
+ // that it exists and such.
+ module, cr, err := i.resourceVariableInfo(scope, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the keys for all the resources that are created for this resource
+ countMax, err := i.resourceCountMax(module, cr, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // If count is zero, we return an empty list
+ if countMax == 0 {
+ return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
+ }
+
+ // If we have no module in the state yet or count, return unknown
+ if module == nil || len(module.Resources) == 0 {
+ return &unknownVariable, nil
+ }
+
+ var values []interface{}
+ for idx := 0; idx < countMax; idx++ {
+ id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
+
+ // ID doesn't have a trailing index. We try both here, but if a value
+ // without a trailing index is found we prefer that. This choice
+ // is for legacy reasons: older versions of TF preferred it.
+ if id == v.ResourceId()+".0" {
+ potential := v.ResourceId()
+ if _, ok := module.Resources[potential]; ok {
+ id = potential
+ }
+ }
+
+ r, ok := module.Resources[id]
+ if !ok {
+ continue
+ }
+
+ if r.Primary == nil {
+ continue
+ }
+
+ if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
+ if singleAttr == config.UnknownVariableValue {
+ return &unknownVariable, nil
+ }
+
+ values = append(values, singleAttr)
+ continue
+ }
+
+ // computed list or map attribute
+ _, isList := r.Primary.Attributes[v.Field+".#"]
+ _, isMap := r.Primary.Attributes[v.Field+".%"]
+ if !(isList || isMap) {
+ continue
+ }
+ multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+ if err != nil {
+ return nil, err
+ }
+
+ if multiAttr == unknownVariable {
+ return &unknownVariable, nil
+ }
+
+ values = append(values, multiAttr)
+ }
+
+ if len(values) == 0 {
+ // If the operation is refresh, it isn't an error for a value to
+ // be unknown. Instead, we return that the value is computed so
+ // that the graph can continue to refresh other nodes. It doesn't
+ // matter because the config isn't interpolated anyways.
+ //
+ // For a Destroy, we're also fine with computed values, since our goal is
+ // only to get destroy nodes for existing resources.
+ //
+ // For an input walk, computed values are okay to return because we're only
+ // looking for missing variables to prompt the user for.
+ if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ return nil, fmt.Errorf(
+ "Resource '%s' does not have attribute '%s' "+
+ "for variable '%s'",
+ v.ResourceId(),
+ v.Field,
+ v.FullKey())
+ }
+
+ variable, err := hil.InterfaceToVariable(values)
+ return &variable, err
+}
+
+func (i *Interpolater) interpolateComplexTypeAttribute(
+ resourceID string,
+ attributes map[string]string) (ast.Variable, error) {
+
+ // We can now distinguish between lists and maps in state by the count field:
+ // - lists (and by extension, sets) use the traditional .# notation
+ // - maps use the newer .% notation
+ // Consequently here we can decide how to deal with the keys appropriately
+ // based on whether the type is a map of list.
+ if lengthAttr, isList := attributes[resourceID+".#"]; isList {
+ log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
+ resourceID, lengthAttr)
+
+ // In Terraform's internal dotted representation of list-like attributes, the
+ // ".#" count field is marked as unknown to indicate "this whole list is
+ // unknown". We must honor that meaning here so computed references can be
+ // treated properly during the plan phase.
+ if lengthAttr == config.UnknownVariableValue {
+ return unknownVariable(), nil
+ }
+
+ expanded := flatmap.Expand(attributes, resourceID)
+ return hil.InterfaceToVariable(expanded)
+ }
+
+ if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
+ log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
+ resourceID, lengthAttr)
+
+ // In Terraform's internal dotted representation of map attributes, the
+ // ".%" count field is marked as unknown to indicate "this whole list is
+ // unknown". We must honor that meaning here so computed references can be
+ // treated properly during the plan phase.
+ if lengthAttr == config.UnknownVariableValue {
+ return unknownVariable(), nil
+ }
+
+ expanded := flatmap.Expand(attributes, resourceID)
+ return hil.InterfaceToVariable(expanded)
+ }
+
+ return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
+}
+
+func (i *Interpolater) resourceVariableInfo(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
+ // Get the module tree that contains our current path. This is
+ // either the current module (path is empty) or a child.
+ modTree := i.Module
+ if len(scope.Path) > 1 {
+ modTree = i.Module.Child(scope.Path[1:])
+ }
+
+ // Get the resource from the configuration so we can verify
+ // that the resource is in the configuration and so we can access
+ // the configuration if we need to.
+ var cr *config.Resource
+ for _, r := range modTree.Config().Resources {
+ if r.Id() == v.ResourceId() {
+ cr = r
+ break
+ }
+ }
+
+ // Get the relevant module
+ module := i.State.ModuleByPath(scope.Path)
+ return module, cr, nil
+}
+
+func (i *Interpolater) resourceCountMax(
+ ms *ModuleState,
+ cr *config.Resource,
+ v *config.ResourceVariable) (int, error) {
+ id := v.ResourceId()
+
+ // If we're NOT applying, then we assume we can read the count
+ // from the state. Plan and so on may not have any state yet so
+ // we do a full interpolation.
+ if i.Operation != walkApply {
+ if cr == nil {
+ return 0, nil
+ }
+
+ count, err := cr.Count()
+ if err != nil {
+ return 0, err
+ }
+
+ return count, nil
+ }
+
+ // We need to determine the list of resource keys to get values from.
+ // This needs to be sorted so the order is deterministic. We used to
+ // use "cr.Count()" but that doesn't work if the count is interpolated
+ // and we can't guarantee that so we instead depend on the state.
+ max := -1
+ for k, _ := range ms.Resources {
+ // Get the index number for this resource
+ index := ""
+ if k == id {
+ // If the key is the id, then its just 0 (no explicit index)
+ index = "0"
+ } else if strings.HasPrefix(k, id+".") {
+ // Grab the index number out of the state
+ index = k[len(id+"."):]
+ if idx := strings.IndexRune(index, '.'); idx >= 0 {
+ index = index[:idx]
+ }
+ }
+
+ // If there was no index then this resource didn't match
+ // the one we're looking for, exit.
+ if index == "" {
+ continue
+ }
+
+ // Turn the index into an int
+ raw, err := strconv.ParseInt(index, 0, 0)
+ if err != nil {
+ return 0, fmt.Errorf(
+ "%s: error parsing index %q as int: %s",
+ id, index, err)
+ }
+
+ // Keep track of this index if its the max
+ if new := int(raw); new > max {
+ max = new
+ }
+ }
+
+ // If we never found any matching resources in the state, we
+ // have zero.
+ if max == -1 {
+ return 0, nil
+ }
+
+ // The result value is "max+1" because we're returning the
+ // max COUNT, not the max INDEX, and we zero-index.
+ return max + 1, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644
index 00000000..bd32c79f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -0,0 +1,14 @@
+package terraform
+
+// NodeCountBoundary fixes any "count boundarie" in the state: resources
+// that are named "foo.0" when they should be named "foo"
+type NodeCountBoundary struct{}
+
+func (n *NodeCountBoundary) Name() string {
+ return "meta.count-boundary (count boundary fixup)"
+}
+
+// GraphNodeEvalable
+func (n *NodeCountBoundary) EvalTree() EvalNode {
+ return &EvalCountFixZeroOneBoundaryGlobal{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644
index 00000000..e32cea88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -0,0 +1,22 @@
+package terraform
+
+// NodeDestroyableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeDestroyableDataResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Just destroy it.
+ var state *InstanceState
+ return &EvalWriteState{
+ Name: stateId,
+ State: &state, // state is nil here
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644
index 00000000..d504c892
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -0,0 +1,198 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodeRefreshableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeRefreshableDataResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count, err := n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodeRefreshableDataResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodeRefreshableDataResource",
+ }
+
+ return b.Build(ctx.Path())
+}
+
+// NodeRefreshableDataResourceInstance represents a _single_ resource instance
+// that is refreshable.
+type NodeRefreshableDataResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Get the state if we have it, if not we build it
+ rs := n.ResourceState
+ if rs == nil {
+ rs = &ResourceState{}
+ }
+
+ // If the config isn't empty we update the state
+ if n.Config != nil {
+ rs = &ResourceState{
+ Type: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: n.StateReferences(),
+ }
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var provider ResourceProvider
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Always destroy the existing state first, since we must
+ // make sure that values from a previous read will not
+ // get interpolated if we end up needing to defer our
+ // loading until apply time.
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state, // state is nil here
+ },
+
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ // The rest of this pass can proceed only if there are no
+ // computed values in our config.
+ // (If there are, we'll deal with this during the plan and
+ // apply phases.)
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ // If the config explicitly has a depends_on for this
+ // data source, assume the intention is to prevent
+ // refreshing ahead of that dependency.
+ if len(n.Config.DependsOn) > 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+
+ Then: EvalNoop{},
+ },
+
+ // The remainder of this pass is the same as running
+ // a "plan" pass immediately followed by an "apply" pass,
+ // populating the state early so it'll be available to
+ // provider configurations that need this data during
+ // refresh/plan.
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Provider: &provider,
+ Output: &diff,
+ OutputState: &state,
+ },
+
+ &EvalReadDataApply{
+ Info: info,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state,
+ },
+
+ &EvalUpdateStateHook{},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644
index 00000000..319df1e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
@@ -0,0 +1,29 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeDestroyableModule represents a module destruction.
+type NodeDestroyableModuleVariable struct {
+ PathValue []string
+}
+
+func (n *NodeDestroyableModuleVariable) Name() string {
+ result := "plan-destroy"
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeDestroyableModuleVariable) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
+ return &EvalDiffDestroyModule{Path: n.PathValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644
index 00000000..13fe8fc3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -0,0 +1,125 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// NodeApplyableModuleVariable represents a module variable input during
+// the apply step.
+type NodeApplyableModuleVariable struct {
+ PathValue []string
+ Config *config.Variable // Config is the var in the config
+ Value *config.RawConfig // Value is the value that is set
+
+ Module *module.Tree // Antiquated, want to remove
+}
+
+func (n *NodeApplyableModuleVariable) Name() string {
+ result := fmt.Sprintf("var.%s", n.Config.Name)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableModuleVariable) Path() []string {
+ // We execute in the parent scope (above our own module) so that
+ // we can access the proper interpolations.
+ if len(n.PathValue) > 2 {
+ return n.PathValue[:len(n.PathValue)-1]
+ }
+
+ return rootModulePath
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferenceGlobal
+func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
+ // We have to create fully qualified references because we cross
+ // boundaries here: our ReferenceableName is in one path and our
+ // References are from another path.
+ return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
+ return []string{n.Name()}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableModuleVariable) References() []string {
+ // If we have no value set, we depend on nothing
+ if n.Value == nil {
+ return nil
+ }
+
+ // Can't depend on anything if we're in the root
+ if len(n.PathValue) < 2 {
+ return nil
+ }
+
+ // Otherwise, we depend on anything that is in our value, but
+ // specifically in the namespace of the parent path.
+ // Create the prefix based on the path
+ var prefix string
+ if p := n.Path(); len(p) > 0 {
+ prefix = modulePrefixStr(p)
+ }
+
+ result := ReferencesFromConfig(n.Value)
+ return modulePrefixList(result, prefix)
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
+ // If we have no value, do nothing
+ if n.Value == nil {
+ return &EvalNoop{}
+ }
+
+ // Otherwise, interpolate the value of this variable and set it
+ // within the variables mapping.
+ var config *ResourceConfig
+ variables := make(map[string]interface{})
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.Value,
+ Output: &config,
+ },
+
+ &EvalVariableBlock{
+ Config: &config,
+ VariableValues: variables,
+ },
+
+ &EvalCoerceMapVariable{
+ Variables: variables,
+ ModulePath: n.PathValue,
+ ModuleTree: n.Module,
+ },
+
+ &EvalTypeCheckVariable{
+ Variables: variables,
+ ModulePath: n.PathValue,
+ ModuleTree: n.Module,
+ },
+
+ &EvalSetVariables{
+ Module: &n.PathValue[len(n.PathValue)-1],
+ Variables: variables,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644
index 00000000..e28e6f02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableOutput represents an output that is "applyable":
+// it is ready to be applied.
+type NodeApplyableOutput struct {
+ PathValue []string
+ Config *config.Output // Config is the output in the config
+}
+
+func (n *NodeApplyableOutput) Name() string {
+ result := fmt.Sprintf("output.%s", n.Config.Name)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableOutput) Path() []string {
+ return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableOutput) ReferenceableName() []string {
+ name := fmt.Sprintf("output.%s", n.Config.Name)
+ return []string{name}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableOutput) References() []string {
+ var result []string
+ result = append(result, n.Config.DependsOn...)
+ result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
+ for _, v := range result {
+ split := strings.Split(v, "/")
+ for i, s := range split {
+ split[i] = s + ".destroy"
+ }
+
+ result = append(result, strings.Join(split, "/"))
+ }
+
+ return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableOutput) EvalTree() EvalNode {
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
+ walkDestroy, walkInput, walkValidate},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalWriteOutput{
+ Name: n.Config.Name,
+ Sensitive: n.Config.Sensitive,
+ Value: n.Config.RawConfig,
+ },
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644
index 00000000..636a15df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -0,0 +1,35 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeOutputOrphan represents an output that is an orphan.
+type NodeOutputOrphan struct {
+ OutputName string
+ PathValue []string
+}
+
+func (n *NodeOutputOrphan) Name() string {
+ result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeOutputOrphan) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeOutputOrphan) EvalTree() EvalNode {
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
+ Node: &EvalDeleteOutput{
+ Name: n.OutputName,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644
index 00000000..8e2c176f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -0,0 +1,11 @@
+package terraform
+
+// NodeApplyableProvider represents a provider during an apply.
+type NodeApplyableProvider struct {
+ *NodeAbstractProvider
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableProvider) EvalTree() EvalNode {
+ return ProviderEvalTree(n.NameValue, n.ProviderConfig())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644
index 00000000..6cc83656
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -0,0 +1,85 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteProviderNodeFunc is a callback type used to convert an
+// abstract provider to a concrete one of some type.
+type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
+
+// NodeAbstractProvider represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeAbstractProvider struct {
+ NameValue string
+ PathValue []string
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.ProviderConfig
+}
+
+func (n *NodeAbstractProvider) Name() string {
+ result := fmt.Sprintf("provider.%s", n.NameValue)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractProvider) Path() []string {
+ return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractProvider) References() []string {
+ if n.Config == nil {
+ return nil
+ }
+
+ return ReferencesFromConfig(n.Config.RawConfig)
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderName() string {
+ return n.NameValue
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
+ if n.Config == nil {
+ return nil
+ }
+
+ return n.Config.RawConfig
+}
+
+// GraphNodeAttachProvider
+func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
+ n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "diamond",
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644
index 00000000..25e7e620
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeDisabledProvider represents a provider that is disabled. A disabled
+// provider does nothing. It exists to properly set inheritance information
+// for child providers.
+type NodeDisabledProvider struct {
+ *NodeAbstractProvider
+}
+
+func (n *NodeDisabledProvider) Name() string {
+ return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
+}
+
+// GraphNodeEvalable
+func (n *NodeDisabledProvider) EvalTree() EvalNode {
+ var resourceConfig *ResourceConfig
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.ProviderConfig(),
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n.ProviderName(),
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n.ProviderName(),
+ Config: &resourceConfig,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644
index 00000000..bb117c1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeProvisioner represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeProvisioner struct {
+ NameValue string
+ PathValue []string
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.ProviderConfig
+}
+
+func (n *NodeProvisioner) Name() string {
+ result := fmt.Sprintf("provisioner.%s", n.NameValue)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeProvisioner) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeProvisioner
+func (n *NodeProvisioner) ProvisionerName() string {
+ return n.NameValue
+}
+
+// GraphNodeEvalable impl.
+func (n *NodeProvisioner) EvalTree() EvalNode {
+ return &EvalInitProvisioner{Name: n.NameValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644
index 00000000..50bb7079
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -0,0 +1,240 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteResourceNodeFunc is a callback type used to convert an
+// abstract resource to a concrete one of some type.
+type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
+
+// GraphNodeResource is implemented by any nodes that represent a resource.
+// The type of operation cannot be assumed, only that this node represents
+// the given resource.
+type GraphNodeResource interface {
+ ResourceAddr() *ResourceAddress
+}
+
+// NodeAbstractResource represents a resource that has no associated
+// operations. It registers all the interfaces for a resource that common
+// across multiple operation types.
+type NodeAbstractResource struct {
+ Addr *ResourceAddress // Addr is the address for this resource
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.Resource // Config is the resource in the config
+ ResourceState *ResourceState // ResourceState is the ResourceState for this
+
+ Targets []ResourceAddress // Set from GraphNodeTargetable
+}
+
+func (n *NodeAbstractResource) Name() string {
+ return n.Addr.String()
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractResource) Path() []string {
+ return n.Addr.Path
+}
+
+// GraphNodeReferenceable
+func (n *NodeAbstractResource) ReferenceableName() []string {
+ // We always are referenceable as "type.name" as long as
+ // we have a config or address. Determine what that value is.
+ var id string
+ if n.Config != nil {
+ id = n.Config.Id()
+ } else if n.Addr != nil {
+ addrCopy := n.Addr.Copy()
+ addrCopy.Path = nil // ReferenceTransformer handles paths
+ addrCopy.Index = -1 // We handle indexes below
+ id = addrCopy.String()
+ } else {
+ // No way to determine our type.name, just return
+ return nil
+ }
+
+ var result []string
+
+ // Always include our own ID. This is primarily for backwards
+ // compatibility with states that didn't yet support the more
+ // specific dep string.
+ result = append(result, id)
+
+ // We represent all multi-access
+ result = append(result, fmt.Sprintf("%s.*", id))
+
+ // We represent either a specific number, or all numbers
+ suffix := "N"
+ if n.Addr != nil {
+ idx := n.Addr.Index
+ if idx == -1 {
+ idx = 0
+ }
+
+ suffix = fmt.Sprintf("%d", idx)
+ }
+ result = append(result, fmt.Sprintf("%s.%s", id, suffix))
+
+ return result
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractResource) References() []string {
+ // If we have a config, that is our source of truth
+ if c := n.Config; c != nil {
+ // Grab all the references
+ var result []string
+ result = append(result, c.DependsOn...)
+ result = append(result, ReferencesFromConfig(c.RawCount)...)
+ result = append(result, ReferencesFromConfig(c.RawConfig)...)
+ for _, p := range c.Provisioners {
+ if p.When == config.ProvisionerWhenCreate {
+ result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+ result = append(result, ReferencesFromConfig(p.RawConfig)...)
+ }
+ }
+
+ return uniqueStrings(result)
+ }
+
+ // If we have state, that is our next source
+ if s := n.ResourceState; s != nil {
+ return s.Dependencies
+ }
+
+ return nil
+}
+
+// StateReferences returns the dependencies to put into the state for
+// this resource.
+func (n *NodeAbstractResource) StateReferences() []string {
+ self := n.ReferenceableName()
+
+ // Determine what our "prefix" is for checking for references to
+ // ourself.
+ addrCopy := n.Addr.Copy()
+ addrCopy.Index = -1
+ selfPrefix := addrCopy.String() + "."
+
+ depsRaw := n.References()
+ deps := make([]string, 0, len(depsRaw))
+ for _, d := range depsRaw {
+ // Ignore any variable dependencies
+ if strings.HasPrefix(d, "var.") {
+ continue
+ }
+
+ // If this has a backup ref, ignore those for now. The old state
+ // file never contained those and I'd rather store the rich types we
+ // add in the future.
+ if idx := strings.IndexRune(d, '/'); idx != -1 {
+ d = d[:idx]
+ }
+
+ // If we're referencing ourself, then ignore it
+ found := false
+ for _, s := range self {
+ if d == s {
+ found = true
+ }
+ }
+ if found {
+ continue
+ }
+
+ // If this is a reference to ourself and a specific index, we keep
+ // it. For example, if this resource is "foo.bar" and the reference
+ // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
+ if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
+ d = d[:len(d)-2]
+ }
+
+ // This is sad. The dependencies are currently in the format of
+ // "module.foo.bar" (the full field). This strips the field off.
+ if strings.HasPrefix(d, "module.") {
+ parts := strings.SplitN(d, ".", 3)
+ d = strings.Join(parts[0:2], ".")
+ }
+
+ deps = append(deps, d)
+ }
+
+ return deps
+}
+
+// GraphNodeProviderConsumer
+func (n *NodeAbstractResource) ProvidedBy() []string {
+ // If we have a config we prefer that above all else
+ if n.Config != nil {
+ return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
+ }
+
+ // If we have state, then we will use the provider from there
+ if n.ResourceState != nil && n.ResourceState.Provider != "" {
+ return []string{n.ResourceState.Provider}
+ }
+
+ // Use our type
+ return []string{resourceProvider(n.Addr.Type, "")}
+}
+
+// GraphNodeProvisionerConsumer
+func (n *NodeAbstractResource) ProvisionedBy() []string {
+ // If we have no configuration, then we have no provisioners
+ if n.Config == nil {
+ return nil
+ }
+
+ // Build the list of provisioners we need based on the configuration.
+ // It is okay to have duplicates here.
+ result := make([]string, len(n.Config.Provisioners))
+ for i, p := range n.Config.Provisioners {
+ result[i] = p.Type
+ }
+
+ return result
+}
+
+// GraphNodeResource, GraphNodeAttachResourceState
+func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeAddressable, TODO: remove, used by target, should unify
+func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
+ return n.ResourceAddr()
+}
+
+// GraphNodeTargetable
+func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
+ n.Targets = targets
+}
+
+// GraphNodeAttachResourceState
+func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
+ n.ResourceState = s
+}
+
+// GraphNodeAttachResourceConfig
+func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
+ n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "box",
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644
index 00000000..573570d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
@@ -0,0 +1,50 @@
+package terraform
+
+// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
+// if the resource has a `count` value that needs to be expanded.
+//
+// The embedder should implement `DynamicExpand` to process the count.
+type NodeAbstractCountResource struct {
+ *NodeAbstractResource
+
+ // Validate, if true, will perform the validation for the count.
+ // This should only be turned on for the "validate" operation.
+ Validate bool
+}
+
+// GraphNodeEvalable
+func (n *NodeAbstractCountResource) EvalTree() EvalNode {
+ // We only check if the count is computed if we're not validating.
+ // If we're validating we allow computed counts since they just turn
+ // into more computed values.
+ var evalCountCheckComputed EvalNode
+ if !n.Validate {
+ evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
+ }
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // The EvalTree for a plannable resource primarily involves
+ // interpolating the count since it can contain variables
+ // we only just received access to.
+ //
+ // With the interpolated count, we can then DynamicExpand
+ // into the proper number of instances.
+ &EvalInterpolate{Config: n.Config.RawCount},
+
+ // Check if the count is computed
+ evalCountCheckComputed,
+
+ // If validation is enabled, perform the validation
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return n.Validate, nil
+ },
+
+ Then: &EvalValidateCount{Resource: n.Config},
+ },
+
+ &EvalCountFixZeroOneBoundary{Resource: n.Config},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644
index 00000000..3599782b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -0,0 +1,357 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeApplyableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeCreator
+func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
+ return n.NodeAbstractResource.Addr
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeApplyableResource) References() []string {
+ result := n.NodeAbstractResource.References()
+
+ // The "apply" side of a resource generally also depends on the
+ // destruction of its dependencies as well. For example, if a LB
+ // references a set of VMs with ${vm.foo.*.id}, then we must wait for
+ // the destruction so we get the newly updated list of VMs.
+ //
+ // The exception here is CBD. When CBD is set, we don't do this since
+ // it would create a cycle. By not creating a cycle, we require two
+ // applies since the first apply the creation step will use the OLD
+ // values (pre-destroy) and the second step will update.
+ //
+ // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
+ // We mimic that behavior here now and can improve upon it in the future.
+ //
+ // This behavior is tested in graph_build_apply_test.go to test ordering.
+ cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
+ if !cbd {
+ // The "apply" side of a resource always depends on the destruction
+ // of all its dependencies in addition to the creation.
+ for _, v := range result {
+ result = append(result, v+".destroy")
+ }
+ }
+
+ return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Determine the dependencies for the state.
+ stateDeps := n.StateReferences()
+
+ // Eval info is different depending on what kind of resource this is
+ switch n.Config.Mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource(
+ stateId, info, resource, stateDeps,
+ )
+ case config.DataResourceMode:
+ return n.evalTreeDataResource(
+ stateId, info, resource, stateDeps)
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+ }
+}
+
+func (n *NodeApplyableResource) evalTreeDataResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ var provider ResourceProvider
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Build the instance info
+ &EvalInstanceInfo{
+ Info: info,
+ },
+
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+
+ // Stop here if we don't actually have a diff
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diff == nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ if diff.GetAttributesLen() == 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ // We need to re-interpolate the config here, rather than
+ // just using the diff's values directly, because we've
+ // potentially learned more variable values during the
+ // apply pass that weren't known when the diff was produced.
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ // Make a new diff with our newly-interpolated config.
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Previous: &diff,
+ Provider: &provider,
+ Output: &diff,
+ },
+
+ &EvalReadDataApply{
+ Info: info,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+
+ // Clear the diff now that we've applied it, so
+ // later nodes won't see a diff that's now a no-op.
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: nil,
+ },
+
+ &EvalUpdateStateHook{},
+ },
+ }
+}
+
+func (n *NodeApplyableResource) evalTreeManagedResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var diff, diffApply *InstanceDiff
+ var state *InstanceState
+ var resourceConfig *ResourceConfig
+ var err error
+ var createNew bool
+ var createBeforeDestroyEnabled bool
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Build the instance info
+ &EvalInstanceInfo{
+ Info: info,
+ },
+
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diffApply,
+ },
+
+ // We don't want to do any destroys
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diffApply == nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ diffApply.SetDestroy(false)
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ destroy := false
+ if diffApply != nil {
+ destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
+ }
+
+ createBeforeDestroyEnabled =
+ n.Config.Lifecycle.CreateBeforeDestroy &&
+ destroy
+
+ return createBeforeDestroyEnabled, nil
+ },
+ Then: &EvalDeposeState{
+ Name: stateId,
+ },
+ },
+
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &resourceConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ // Re-run validation to catch any errors we missed, e.g. type
+ // mismatches on computed values.
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &resourceConfig,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ IgnoreWarnings: true,
+ },
+ &EvalDiff{
+ Info: info,
+ Config: &resourceConfig,
+ Resource: n.Config,
+ Provider: &provider,
+ Diff: &diffApply,
+ State: &state,
+ OutputDiff: &diffApply,
+ },
+
+ // Get the saved diff
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+
+ // Compare the diffs
+ &EvalCompareDiff{
+ Info: info,
+ One: &diff,
+ Two: &diffApply,
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ },
+ &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ CreateNew: &createNew,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ &EvalApplyProvisioners{
+ Info: info,
+ State: &state,
+ Resource: n.Config,
+ InterpResource: resource,
+ CreateNew: &createNew,
+ Error: &err,
+ When: config.ProvisionerWhenCreate,
+ },
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return createBeforeDestroyEnabled && err != nil, nil
+ },
+ Then: &EvalUndeposeState{
+ Name: stateId,
+ State: &state,
+ },
+ Else: &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ },
+
+ // We clear the diff out here so that future nodes
+ // don't see a diff that is already complete. There
+ // is no longer a diff!
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: nil,
+ },
+
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644
index 00000000..c2efd2c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -0,0 +1,288 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeDestroyResource represents a resource that is to be destroyed.
+type NodeDestroyResource struct {
+ *NodeAbstractResource
+}
+
+func (n *NodeDestroyResource) Name() string {
+ return n.NodeAbstractResource.Name() + " (destroy)"
+}
+
+// GraphNodeDestroyer
+func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
+ // If we have no config, we just assume no
+ if n.Config == nil {
+ return false
+ }
+
+ return n.Config.Lifecycle.CreateBeforeDestroy
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
+ // If we have no config, do nothing since it won't affect the
+ // create step anyways.
+ if n.Config == nil {
+ return nil
+ }
+
+ // Set CBD to true
+ n.Config.Lifecycle.CreateBeforeDestroy = true
+
+ return nil
+}
+
+// GraphNodeReferenceable, overriding NodeAbstractResource
+func (n *NodeDestroyResource) ReferenceableName() []string {
+ // We modify our referenceable name to have the suffix of ".destroy"
+ // since depending on the creation side doesn't necessarilly mean
+ // depending on destruction.
+ suffix := ".destroy"
+
+ // If we're CBD, we also append "-cbd". This is because CBD will setup
+ // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
+ // side generally doesn't mean depending on CBD as well. See GH-11349
+ if n.CreateBeforeDestroy() {
+ suffix += "-cbd"
+ }
+
+ result := n.NodeAbstractResource.ReferenceableName()
+ for i, v := range result {
+ result[i] = v + suffix
+ }
+
+ return result
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeDestroyResource) References() []string {
+ // If we have a config, then we need to include destroy-time dependencies
+ if c := n.Config; c != nil {
+ var result []string
+ for _, p := range c.Provisioners {
+ // We include conn info and config for destroy time provisioners
+ // as dependencies that we have.
+ if p.When == config.ProvisionerWhenDestroy {
+ result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+ result = append(result, ReferencesFromConfig(p.RawConfig)...)
+ }
+ }
+
+ return result
+ }
+
+ return nil
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // If we have no config we do nothing
+ if n.Addr == nil {
+ return nil, nil
+ }
+
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Start creating the steps
+ steps := make([]GraphTransformer, 0, 5)
+
+ // We want deposed resources in the state to be destroyed
+ steps = append(steps, &DeposedTransformer{
+ State: state,
+ View: n.Addr.stateId(),
+ })
+
+ // Target
+ steps = append(steps, &TargetsTransformer{
+ ParsedTargets: n.Targets,
+ })
+
+ // Always end with the root being added
+ steps = append(steps, &RootTransformer{})
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Name: "NodeResourceDestroy",
+ }
+ return b.Build(ctx.Path())
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyResource) EvalTree() EvalNode {
+ // stateId is the ID to put into the state
+ stateId := n.Addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: n.Addr.Type,
+ uniqueExtra: "destroy",
+ }
+
+ // Build the resource for eval
+ addr := n.Addr
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Get our state
+ rs := n.ResourceState
+ if rs == nil {
+ rs = &ResourceState{}
+ }
+
+ var diffApply *InstanceDiff
+ var provider ResourceProvider
+ var state *InstanceState
+ var err error
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkApply, walkDestroy},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diffApply,
+ },
+
+ // Filter the diff so we only get the destroy
+ &EvalFilterDiff{
+ Diff: &diffApply,
+ Output: &diffApply,
+ Destroy: true,
+ },
+
+ // If we're not destroying, then compare diffs
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diffApply != nil && diffApply.GetDestroy() {
+ return true, nil
+ }
+
+ return true, EvalEarlyExitError{}
+ },
+ Then: EvalNoop{},
+ },
+
+ // Load the instance info so we have the module path set
+ &EvalInstanceInfo{Info: info},
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalRequireState{
+ State: &state,
+ },
+
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ },
+
+ // Run destroy provisioners if not tainted
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if state != nil && state.Tainted {
+ return false, nil
+ }
+
+ return true, nil
+ },
+
+ Then: &EvalApplyProvisioners{
+ Info: info,
+ State: &state,
+ Resource: n.Config,
+ InterpResource: resource,
+ Error: &err,
+ When: config.ProvisionerWhenDestroy,
+ },
+ },
+
+ // If we have a provisioning error, then we just call
+ // the post-apply hook now.
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return err != nil, nil
+ },
+
+ Then: &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ },
+
+ // Make sure we handle data sources properly.
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if n.Addr == nil {
+ return false, fmt.Errorf("nil address")
+ }
+
+ if n.Addr.Mode == config.DataResourceMode {
+ return true, nil
+ }
+
+ return false, nil
+ },
+
+ Then: &EvalReadDataApply{
+ Info: info,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ },
+ Else: &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ },
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Addr.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state,
+ },
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644
index 00000000..52bbf88a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -0,0 +1,83 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodePlannableResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodePlannableResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count, err := n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodePlannableResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // The concrete resource factory we'll use for oprhans
+ concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodePlannableResourceOrphan{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Add the count orphans
+ &OrphanResourceCountTransformer{
+ Concrete: concreteResourceOrphan,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ State: state,
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodePlannableResource",
+ }
+ return b.Build(ctx.Path())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644
index 00000000..9b02362b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -0,0 +1,53 @@
+package terraform
+
+// NodePlanDestroyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlanDestroyableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ Diff: &diff,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644
index 00000000..b5295690
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -0,0 +1,190 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodePlannableResourceInstance represents a _single_ resource
+// instance that is plannable. This means this represents a single
+// count index, for example.
+type NodePlannableResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ ModulePath: normalizeModulePath(addr.Path),
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Determine the dependencies for the state.
+ stateDeps := n.StateReferences()
+
+ // Eval info is different depending on what kind of resource this is
+ switch n.Config.Mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource(
+ stateId, info, resource, stateDeps,
+ )
+ case config.DataResourceMode:
+ return n.evalTreeDataResource(
+ stateId, info, resource, stateDeps)
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+ }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeDataResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ var provider ResourceProvider
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+
+ // We need to re-interpolate the config here because some
+ // of the attributes may have become computed during
+ // earlier planning, due to other resources having
+ // "requires new resource" diffs.
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
+
+ // If the configuration is complete and we
+ // already have a state then we don't need to
+ // do any further work during apply, because we
+ // already populated the state during refresh.
+ if !computed && state != nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Provider: &provider,
+ Output: &diff,
+ OutputState: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeManagedResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var diff *InstanceDiff
+ var state *InstanceState
+ var resourceConfig *ResourceConfig
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &resourceConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ // Re-run validation to catch any errors we missed, e.g. type
+ // mismatches on computed values.
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &resourceConfig,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ IgnoreWarnings: true,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiff{
+ Name: stateId,
+ Info: info,
+ Config: &resourceConfig,
+ Resource: n.Config,
+ Provider: &provider,
+ State: &state,
+ OutputDiff: &diff,
+ OutputState: &state,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ Diff: &diff,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644
index 00000000..73d6e41f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -0,0 +1,54 @@
+package terraform
+
+// NodePlannableResourceOrphan represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlannableResourceOrphan struct {
+ *NodeAbstractResource
+}
+
+func (n *NodePlannableResourceOrphan) Name() string {
+ return n.NodeAbstractResource.Name() + " (orphan)"
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ ModulePath: normalizeModulePath(addr.Path),
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ ResourceId: stateId,
+ Diff: &diff,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644
index 00000000..3a44926c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -0,0 +1,100 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeRefreshableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeRefreshableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableResource) EvalTree() EvalNode {
+ // Eval info is different depending on what kind of resource this is
+ switch mode := n.Addr.Mode; mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource()
+
+ case config.DataResourceMode:
+ // Get the data source node. If we don't have a configuration
+ // then it is an orphan so we destroy it (remove it from the state).
+ var dn GraphNodeEvalable
+ if n.Config != nil {
+ dn = &NodeRefreshableDataResourceInstance{
+ NodeAbstractResource: n.NodeAbstractResource,
+ }
+ } else {
+ dn = &NodeDestroyableDataResource{
+ NodeAbstractResource: n.NodeAbstractResource,
+ }
+ }
+
+ return dn.EvalTree()
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", mode))
+ }
+}
+
+func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var state *InstanceState
+
+ // This happened during initial development. All known cases were
+ // fixed and tested but as a sanity check let's assert here.
+ if n.ResourceState == nil {
+ err := fmt.Errorf(
+ "No resource state attached for addr: %s\n\n"+
+ "This is a bug. Please report this to Terraform with your configuration\n"+
+ "and state attached. Please be careful to scrub any sensitive information.",
+ addr)
+ return &EvalReturnError{Error: &err}
+ }
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalRefresh{
+ Info: info,
+ Provider: &provider,
+ State: &state,
+ Output: &state,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.ResourceState.Type,
+ Provider: n.ResourceState.Provider,
+ Dependencies: n.ResourceState.Dependencies,
+ State: &state,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644
index 00000000..f528f24b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodeValidatableResource represents a resource that is used for validation
+// only.
+type NodeValidatableResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResource) EvalTree() EvalNode {
+ // Ensure we're validating
+ c := n.NodeAbstractCountResource
+ c.Validate = true
+ return c.EvalTree()
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count := 1
+ if n.Config.RawCount.Value() != unknownValue() {
+ var err error
+ count, err = n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodeValidatableResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodeValidatableResource",
+ }
+
+ return b.Build(ctx.Path())
+}
+
+// This represents a _single_ resource instance to validate.
+type NodeValidatableResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var config *ResourceConfig
+ var provider ResourceProvider
+
+ seq := &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalValidateResourceSelfRef{
+ Addr: &addr,
+ Config: &n.Config.RawConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &config,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ },
+ },
+ }
+
+ // Validate all the provisioners
+ for _, p := range n.Config.Provisioners {
+ var provisioner ResourceProvisioner
+ var connConfig *ResourceConfig
+ seq.Nodes = append(
+ seq.Nodes,
+ &EvalGetProvisioner{
+ Name: p.Type,
+ Output: &provisioner,
+ },
+ &EvalInterpolate{
+ Config: p.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+ &EvalInterpolate{
+ Config: p.ConnInfo.Copy(),
+ Resource: resource,
+ Output: &connConfig,
+ },
+ &EvalValidateProvisioner{
+ Provisioner: &provisioner,
+ Config: &config,
+ ConnConfig: &connConfig,
+ },
+ )
+ }
+
+ return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644
index 00000000..cb61a4e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -0,0 +1,22 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeRootVariable represents a root variable input.
+type NodeRootVariable struct {
+ Config *config.Variable
+}
+
+func (n *NodeRootVariable) Name() string {
+ result := fmt.Sprintf("var.%s", n.Config.Name)
+ return result
+}
+
+// GraphNodeReferenceable
+func (n *NodeRootVariable) ReferenceableName() []string {
+ return []string{n.Name()}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644
index 00000000..ca99685a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -0,0 +1,24 @@
+package terraform
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+)
+
+// PathCacheKey returns a cache key for a module path.
+//
+// TODO: test
+func PathCacheKey(path []string) string {
+ // There is probably a better way to do this, but this is working for now.
+ // We just create an MD5 hash of all the MD5 hashes of all the path
+ // elements. This gets us the property that it is unique per ordering.
+ hash := md5.New()
+ for _, p := range path {
+ single := md5.Sum([]byte(p))
+ if _, err := hash.Write(single[:]); err != nil {
+ panic(err)
+ }
+ }
+
+ return hex.EncodeToString(hash.Sum(nil))
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644
index 00000000..ea088450
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -0,0 +1,153 @@
+package terraform
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/hashicorp/terraform/config/module"
+)
+
+func init() {
+ gob.Register(make([]interface{}, 0))
+ gob.Register(make([]map[string]interface{}, 0))
+ gob.Register(make(map[string]interface{}))
+ gob.Register(make(map[string]string))
+}
+
+// Plan represents a single Terraform execution plan, which contains
+// all the information necessary to make an infrastructure change.
+//
+// A plan has to contain basically the entire state of the world
+// necessary to make a change: the state, diff, config, backend config, etc.
+// This is so that it can run alone without any other data.
+type Plan struct {
+ Diff *Diff
+ Module *module.Tree
+ State *State
+ Vars map[string]interface{}
+ Targets []string
+
+ // Backend is the backend that this plan should use and store data with.
+ Backend *BackendState
+
+ once sync.Once
+}
+
+// Context returns a Context with the data encapsulated in this plan.
+//
+// The following fields in opts are overridden by the plan: Config,
+// Diff, State, Variables.
+func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
+ opts.Diff = p.Diff
+ opts.Module = p.Module
+ opts.State = p.State
+ opts.Targets = p.Targets
+
+ opts.Variables = make(map[string]interface{})
+ for k, v := range p.Vars {
+ opts.Variables[k] = v
+ }
+
+ return NewContext(opts)
+}
+
+func (p *Plan) String() string {
+ buf := new(bytes.Buffer)
+ buf.WriteString("DIFF:\n\n")
+ buf.WriteString(p.Diff.String())
+ buf.WriteString("\n\nSTATE:\n\n")
+ buf.WriteString(p.State.String())
+ return buf.String()
+}
+
+func (p *Plan) init() {
+ p.once.Do(func() {
+ if p.Diff == nil {
+ p.Diff = new(Diff)
+ p.Diff.init()
+ }
+
+ if p.State == nil {
+ p.State = new(State)
+ p.State.init()
+ }
+
+ if p.Vars == nil {
+ p.Vars = make(map[string]interface{})
+ }
+ })
+}
+
+// The format byte is prefixed into the plan file format so that we have
+// the ability in the future to change the file format if we want for any
+// reason.
+const planFormatMagic = "tfplan"
+const planFormatVersion byte = 1
+
+// ReadPlan reads a plan structure out of a reader in the format that
+// was written by WritePlan.
+func ReadPlan(src io.Reader) (*Plan, error) {
+ var result *Plan
+ var err error
+ n := 0
+
+ // Verify the magic bytes
+ magic := make([]byte, len(planFormatMagic))
+ for n < len(magic) {
+ n, err = src.Read(magic[n:])
+ if err != nil {
+ return nil, fmt.Errorf("error while reading magic bytes: %s", err)
+ }
+ }
+ if string(magic) != planFormatMagic {
+ return nil, fmt.Errorf("not a valid plan file")
+ }
+
+ // Verify the version is something we can read
+ var formatByte [1]byte
+ n, err = src.Read(formatByte[:])
+ if err != nil {
+ return nil, err
+ }
+ if n != len(formatByte) {
+ return nil, errors.New("failed to read plan version byte")
+ }
+
+ if formatByte[0] != planFormatVersion {
+ return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
+ }
+
+ dec := gob.NewDecoder(src)
+ if err := dec.Decode(&result); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// WritePlan writes a plan somewhere in a binary format.
+func WritePlan(d *Plan, dst io.Writer) error {
+ // Write the magic bytes so we can determine the file format later
+ n, err := dst.Write([]byte(planFormatMagic))
+ if err != nil {
+ return err
+ }
+ if n != len(planFormatMagic) {
+ return errors.New("failed to write plan format magic bytes")
+ }
+
+ // Write a version byte so we can iterate on version at some point
+ n, err = dst.Write([]byte{planFormatVersion})
+ if err != nil {
+ return err
+ }
+ if n != 1 {
+ return errors.New("failed to write plan version byte")
+ }
+
+ return gob.NewEncoder(dst).Encode(d)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644
index 00000000..0acf0beb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -0,0 +1,360 @@
+package terraform
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/copystructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// ResourceProvisionerConfig is used to pair a provisioner
+// with its provided configuration. This allows us to use singleton
+// instances of each ResourceProvisioner and to keep the relevant
+// configuration instead of instantiating a new Provisioner for each
+// resource.
+type ResourceProvisionerConfig struct {
+ Type string
+ Provisioner ResourceProvisioner
+ Config *ResourceConfig
+ RawConfig *config.RawConfig
+ ConnInfo *config.RawConfig
+}
+
+// Resource encapsulates a resource, its configuration, its provider,
+// its current state, and potentially a desired diff from the state it
+// wants to reach.
+type Resource struct {
+ // These are all used by the new EvalNode stuff.
+ Name string
+ Type string
+ CountIndex int
+
+ // These aren't really used anymore anywhere, but we keep them around
+ // since we haven't done a proper cleanup yet.
+ Id string
+ Info *InstanceInfo
+ Config *ResourceConfig
+ Dependencies []string
+ Diff *InstanceDiff
+ Provider ResourceProvider
+ State *InstanceState
+ Provisioners []*ResourceProvisionerConfig
+ Flags ResourceFlag
+}
+
+// ResourceKind specifies what kind of instance we're working with, whether
+// its a primary instance, a tainted instance, or an orphan.
+type ResourceFlag byte
+
+// InstanceInfo is used to hold information about the instance and/or
+// resource being modified.
+type InstanceInfo struct {
+ // Id is a unique name to represent this instance. This is not related
+ // to InstanceState.ID in any way.
+ Id string
+
+ // ModulePath is the complete path of the module containing this
+ // instance.
+ ModulePath []string
+
+ // Type is the resource type of this instance
+ Type string
+
+ // uniqueExtra is an internal field that can be populated to supply
+ // extra metadata that is used to identify a unique instance in
+ // the graph walk. This will be appended to HumanID when uniqueId
+ // is called.
+ uniqueExtra string
+}
+
+// HumanId is a unique Id that is human-friendly and useful for UI elements.
+func (i *InstanceInfo) HumanId() string {
+ if i == nil {
+ return "<nil>"
+ }
+
+ if len(i.ModulePath) <= 1 {
+ return i.Id
+ }
+
+ return fmt.Sprintf(
+ "module.%s.%s",
+ strings.Join(i.ModulePath[1:], "."),
+ i.Id)
+}
+
+func (i *InstanceInfo) uniqueId() string {
+ prefix := i.HumanId()
+ if v := i.uniqueExtra; v != "" {
+ prefix += " " + v
+ }
+
+ return prefix
+}
+
+// ResourceConfig holds the configuration given for a resource. This is
+// done instead of a raw `map[string]interface{}` type so that rich
+// methods can be added to it to make dealing with it easier.
+type ResourceConfig struct {
+ ComputedKeys []string
+ Raw map[string]interface{}
+ Config map[string]interface{}
+
+ raw *config.RawConfig
+}
+
+// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
+func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
+ result := &ResourceConfig{raw: c}
+ result.interpolateForce()
+ return result
+}
+
+// DeepCopy performs a deep copy of the configuration. This makes it safe
+// to modify any of the structures that are part of the resource config without
+// affecting the original configuration.
+func (c *ResourceConfig) DeepCopy() *ResourceConfig {
+ // DeepCopying a nil should return a nil to avoid panics
+ if c == nil {
+ return nil
+ }
+
+ // Copy, this will copy all the exported attributes
+ copy, err := copystructure.Config{Lock: true}.Copy(c)
+ if err != nil {
+ panic(err)
+ }
+
+ // Force the type
+ result := copy.(*ResourceConfig)
+
+ // For the raw configuration, we can just use its own copy method
+ result.raw = c.raw.Copy()
+
+ return result
+}
+
+// Equal checks the equality of two resource configs.
+func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
+ // If either are nil, then they're only equal if they're both nil
+ if c == nil || c2 == nil {
+ return c == c2
+ }
+
+ // Sort the computed keys so they're deterministic
+ sort.Strings(c.ComputedKeys)
+ sort.Strings(c2.ComputedKeys)
+
+ // Two resource configs if their exported properties are equal.
+ // We don't compare "raw" because it is never used again after
+ // initialization and for all intents and purposes they are equal
+ // if the exported properties are equal.
+ check := [][2]interface{}{
+ {c.ComputedKeys, c2.ComputedKeys},
+ {c.Raw, c2.Raw},
+ {c.Config, c2.Config},
+ }
+ for _, pair := range check {
+ if !reflect.DeepEqual(pair[0], pair[1]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// CheckSet checks that the given list of configuration keys is
+// properly set. If not, errors are returned for each unset key.
+//
+// This is useful to be called in the Validate method of a ResourceProvider.
+func (c *ResourceConfig) CheckSet(keys []string) []error {
+ var errs []error
+
+ for _, k := range keys {
+ if !c.IsSet(k) {
+ errs = append(errs, fmt.Errorf("%s must be set", k))
+ }
+ }
+
+ return errs
+}
+
+// Get looks up a configuration value by key and returns the value.
+//
+// The second return value is true if the get was successful. Get will
+// return the raw value if the key is computed, so you should pair this
+// with IsComputed.
+func (c *ResourceConfig) Get(k string) (interface{}, bool) {
+ // We aim to get a value from the configuration. If it is computed,
+ // then we return the pure raw value.
+ source := c.Config
+ if c.IsComputed(k) {
+ source = c.Raw
+ }
+
+ return c.get(k, source)
+}
+
+// GetRaw looks up a configuration value by key and returns the value,
+// from the raw, uninterpolated config.
+//
+// The second return value is true if the get was successful. Get will
+// not succeed if the value is being computed.
+func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
+ return c.get(k, c.Raw)
+}
+
+// IsComputed returns whether the given key is computed or not.
+func (c *ResourceConfig) IsComputed(k string) bool {
+ // The next thing we do is check the config if we get a computed
+ // value out of it.
+ v, ok := c.get(k, c.Config)
+ if !ok {
+ return false
+ }
+
+ // If value is nil, then it isn't computed
+ if v == nil {
+ return false
+ }
+
+ // Test if the value contains an unknown value
+ var w unknownCheckWalker
+ if err := reflectwalk.Walk(v, &w); err != nil {
+ panic(err)
+ }
+
+ return w.Unknown
+}
+
+// IsSet checks if the key in the configuration is set. A key is set if
+// it has a value or the value is being computed (is unknown currently).
+//
+// This function should be used rather than checking the keys of the
+// raw configuration itself, since a key may be omitted from the raw
+// configuration if it is being computed.
+func (c *ResourceConfig) IsSet(k string) bool {
+ if c == nil {
+ return false
+ }
+
+ if c.IsComputed(k) {
+ return true
+ }
+
+ if _, ok := c.Get(k); ok {
+ return true
+ }
+
+ return false
+}
+
+func (c *ResourceConfig) get(
+ k string, raw map[string]interface{}) (interface{}, bool) {
+ parts := strings.Split(k, ".")
+ if len(parts) == 1 && parts[0] == "" {
+ parts = nil
+ }
+
+ var current interface{} = raw
+ var previous interface{} = nil
+ for i, part := range parts {
+ if current == nil {
+ return nil, false
+ }
+
+ cv := reflect.ValueOf(current)
+ switch cv.Kind() {
+ case reflect.Map:
+ previous = current
+ v := cv.MapIndex(reflect.ValueOf(part))
+ if !v.IsValid() {
+ if i > 0 && i != (len(parts)-1) {
+ tryKey := strings.Join(parts[i:], ".")
+ v := cv.MapIndex(reflect.ValueOf(tryKey))
+ if !v.IsValid() {
+ return nil, false
+ }
+
+ return v.Interface(), true
+ }
+
+ return nil, false
+ }
+
+ current = v.Interface()
+ case reflect.Slice:
+ previous = current
+
+ if part == "#" {
+ // If any value in a list is computed, this whole thing
+ // is computed and we can't read any part of it.
+ for i := 0; i < cv.Len(); i++ {
+ if v := cv.Index(i).Interface(); v == unknownValue() {
+ return v, true
+ }
+ }
+
+ current = cv.Len()
+ } else {
+ i, err := strconv.ParseInt(part, 0, 0)
+ if err != nil {
+ return nil, false
+ }
+ if i >= int64(cv.Len()) {
+ return nil, false
+ }
+ current = cv.Index(int(i)).Interface()
+ }
+ case reflect.String:
+ // This happens when map keys contain "." and have a common
+ // prefix so were split as path components above.
+ actualKey := strings.Join(parts[i-1:], ".")
+ if prevMap, ok := previous.(map[string]interface{}); ok {
+ v, ok := prevMap[actualKey]
+ return v, ok
+ }
+
+ return nil, false
+ default:
+ panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
+ }
+ }
+
+ return current, true
+}
+
+// interpolateForce is a temporary thing. We want to get rid of interpolate
+// above and likewise this, but it can only be done after the f-ast-graph
+// refactor is complete.
+func (c *ResourceConfig) interpolateForce() {
+ if c.raw == nil {
+ var err error
+ c.raw, err = config.NewRawConfig(make(map[string]interface{}))
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ c.ComputedKeys = c.raw.UnknownKeys()
+ c.Raw = c.raw.RawMap()
+ c.Config = c.raw.Config()
+}
+
+// unknownCheckWalker
+type unknownCheckWalker struct {
+ Unknown bool
+}
+
+func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
+ if v.Interface() == unknownValue() {
+ w.Unknown = true
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644
index 00000000..a8a0c955
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -0,0 +1,301 @@
+package terraform
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// ResourceAddress is a way of identifying an individual resource (or,
+// eventually, a subset of resources) within the state. It is used for Targets.
+type ResourceAddress struct {
+ // Addresses a resource falling somewhere in the module path
+ // When specified alone, addresses all resources within a module path
+ Path []string
+
+ // Addresses a specific resource that occurs in a list
+ Index int
+
+ InstanceType InstanceType
+ InstanceTypeSet bool
+ Name string
+ Type string
+ Mode config.ResourceMode // significant only if InstanceTypeSet
+}
+
+// Copy returns a copy of this ResourceAddress
+func (r *ResourceAddress) Copy() *ResourceAddress {
+ if r == nil {
+ return nil
+ }
+
+ n := &ResourceAddress{
+ Path: make([]string, 0, len(r.Path)),
+ Index: r.Index,
+ InstanceType: r.InstanceType,
+ Name: r.Name,
+ Type: r.Type,
+ Mode: r.Mode,
+ }
+ for _, p := range r.Path {
+ n.Path = append(n.Path, p)
+ }
+ return n
+}
+
+// String outputs the address that parses into this address.
+func (r *ResourceAddress) String() string {
+ var result []string
+ for _, p := range r.Path {
+ result = append(result, "module", p)
+ }
+
+ switch r.Mode {
+ case config.ManagedResourceMode:
+ // nothing to do
+ case config.DataResourceMode:
+ result = append(result, "data")
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
+ }
+
+ if r.Type != "" {
+ result = append(result, r.Type)
+ }
+
+ if r.Name != "" {
+ name := r.Name
+ if r.InstanceTypeSet {
+ switch r.InstanceType {
+ case TypePrimary:
+ name += ".primary"
+ case TypeDeposed:
+ name += ".deposed"
+ case TypeTainted:
+ name += ".tainted"
+ }
+ }
+
+ if r.Index >= 0 {
+ name += fmt.Sprintf("[%d]", r.Index)
+ }
+ result = append(result, name)
+ }
+
+ return strings.Join(result, ".")
+}
+
+// stateId returns the ID that this resource should be entered with
+// in the state. This is also used for diffs. In the future, we'd like to
+// move away from this string field so I don't export this.
+func (r *ResourceAddress) stateId() string {
+ result := fmt.Sprintf("%s.%s", r.Type, r.Name)
+ switch r.Mode {
+ case config.ManagedResourceMode:
+ // Done
+ case config.DataResourceMode:
+ result = fmt.Sprintf("data.%s", result)
+ default:
+ panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
+ }
+ if r.Index >= 0 {
+ result += fmt.Sprintf(".%d", r.Index)
+ }
+
+ return result
+}
+
+// parseResourceAddressConfig creates a resource address from a config.Resource
+func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
+ return &ResourceAddress{
+ Type: r.Type,
+ Name: r.Name,
+ Index: -1,
+ InstanceType: TypePrimary,
+ Mode: r.Mode,
+ }, nil
+}
+
+// parseResourceAddressInternal parses the somewhat bespoke resource
+// identifier used in states and diffs, such as "instance.name.0".
+func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
+ // Split based on ".". Every resource address should have at least two
+ // elements (type and name).
+ parts := strings.Split(s, ".")
+ if len(parts) < 2 || len(parts) > 4 {
+ return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+ }
+
+ // Data resource if we have at least 3 parts and the first one is data
+ mode := config.ManagedResourceMode
+ if len(parts) > 2 && parts[0] == "data" {
+ mode = config.DataResourceMode
+ parts = parts[1:]
+ }
+
+ // If we're not a data resource and we have more than 3, then it is an error
+ if len(parts) > 3 && mode != config.DataResourceMode {
+ return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+ }
+
+ // Build the parts of the resource address that are guaranteed to exist
+ addr := &ResourceAddress{
+ Type: parts[0],
+ Name: parts[1],
+ Index: -1,
+ InstanceType: TypePrimary,
+ Mode: mode,
+ }
+
+ // If we have more parts, then we have an index. Parse that.
+ if len(parts) > 2 {
+ idx, err := strconv.ParseInt(parts[2], 0, 0)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
+ }
+
+ addr.Index = int(idx)
+ }
+
+ return addr, nil
+}
+
+func ParseResourceAddress(s string) (*ResourceAddress, error) {
+ matches, err := tokenizeResourceAddress(s)
+ if err != nil {
+ return nil, err
+ }
+ mode := config.ManagedResourceMode
+ if matches["data_prefix"] != "" {
+ mode = config.DataResourceMode
+ }
+ resourceIndex, err := ParseResourceIndex(matches["index"])
+ if err != nil {
+ return nil, err
+ }
+ instanceType, err := ParseInstanceType(matches["instance_type"])
+ if err != nil {
+ return nil, err
+ }
+ path := ParseResourcePath(matches["path"])
+
+ // not allowed to say "data." without a type following
+ if mode == config.DataResourceMode && matches["type"] == "" {
+ return nil, fmt.Errorf("must target specific data instance")
+ }
+
+ return &ResourceAddress{
+ Path: path,
+ Index: resourceIndex,
+ InstanceType: instanceType,
+ InstanceTypeSet: matches["instance_type"] != "",
+ Name: matches["name"],
+ Type: matches["type"],
+ Mode: mode,
+ }, nil
+}
+
+func (addr *ResourceAddress) Equals(raw interface{}) bool {
+ other, ok := raw.(*ResourceAddress)
+ if !ok {
+ return false
+ }
+
+ pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
+ reflect.DeepEqual(addr.Path, other.Path)
+
+ indexMatch := addr.Index == -1 ||
+ other.Index == -1 ||
+ addr.Index == other.Index
+
+ nameMatch := addr.Name == "" ||
+ other.Name == "" ||
+ addr.Name == other.Name
+
+ typeMatch := addr.Type == "" ||
+ other.Type == "" ||
+ addr.Type == other.Type
+
+ // mode is significant only when type is set
+ modeMatch := addr.Type == "" ||
+ other.Type == "" ||
+ addr.Mode == other.Mode
+
+ return pathMatch &&
+ indexMatch &&
+ addr.InstanceType == other.InstanceType &&
+ nameMatch &&
+ typeMatch &&
+ modeMatch
+}
+
+func ParseResourceIndex(s string) (int, error) {
+ if s == "" {
+ return -1, nil
+ }
+ return strconv.Atoi(s)
+}
+
+func ParseResourcePath(s string) []string {
+ if s == "" {
+ return nil
+ }
+ parts := strings.Split(s, ".")
+ path := make([]string, 0, len(parts))
+ for _, s := range parts {
+ // Due to the limitations of the regexp match below, the path match has
+ // some noise in it we have to filter out :|
+ if s == "" || s == "module" {
+ continue
+ }
+ path = append(path, s)
+ }
+ return path
+}
+
+func ParseInstanceType(s string) (InstanceType, error) {
+ switch s {
+ case "", "primary":
+ return TypePrimary, nil
+ case "deposed":
+ return TypeDeposed, nil
+ case "tainted":
+ return TypeTainted, nil
+ default:
+ return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
+ }
+}
+
+func tokenizeResourceAddress(s string) (map[string]string, error) {
+ // Example of portions of the regexp below using the
+ // string "aws_instance.web.tainted[1]"
+ re := regexp.MustCompile(`\A` +
+ // "module.foo.module.bar" (optional)
+ `(?P<path>(?:module\.[^.]+\.?)*)` +
+ // possibly "data.", if targeting is a data resource
+ `(?P<data_prefix>(?:data\.)?)` +
+ // "aws_instance.web" (optional when module path specified)
+ `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
+ // "tainted" (optional, omission implies: "primary")
+ `(?:\.(?P<instance_type>\w+))?` +
+ // "1" (optional, omission implies: "0")
+ `(?:\[(?P<index>\d+)\])?` +
+ `\z`)
+
+ groupNames := re.SubexpNames()
+ rawMatches := re.FindAllStringSubmatch(s, -1)
+ if len(rawMatches) != 1 {
+ return nil, fmt.Errorf("Problem parsing address: %q", s)
+ }
+
+ matches := make(map[string]string)
+ for i, m := range rawMatches[0] {
+ matches[groupNames[i]] = m
+ }
+
+ return matches, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644
index 00000000..1a68c869
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -0,0 +1,204 @@
+package terraform
+
+// ResourceProvider is an interface that must be implemented by any
+// resource provider: the thing that creates and manages the resources in
+// a Terraform configuration.
+//
+// Important implementation note: All returned pointers, such as
+// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
+// shared data. Terraform is highly parallel and assumes that this data is safe
+// to read/write in parallel so it must be unique references. Note that it is
+// safe to return arguments as results, however.
+type ResourceProvider interface {
+ /*********************************************************************
+ * Functions related to the provider
+ *********************************************************************/
+
+ // Input is called to ask the provider to ask the user for input
+ // for completing the configuration if necesarry.
+ //
+ // This may or may not be called, so resource provider writers shouldn't
+ // rely on this being available to set some default values for validate
+ // later. Example of a situation where this wouldn't be called is if
+ // the user is not using a TTY.
+ Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
+
+ // Validate is called once at the beginning with the raw configuration
+ // (no interpolation done) and can return a list of warnings and/or
+ // errors.
+ //
+ // This is called once with the provider configuration only. It may not
+ // be called at all if no provider configuration is given.
+ //
+ // This should not assume that any values of the configurations are valid.
+ // The primary use case of this call is to check that required keys are
+ // set.
+ Validate(*ResourceConfig) ([]string, []error)
+
+ // Configure configures the provider itself with the configuration
+ // given. This is useful for setting things like access keys.
+ //
+ // This won't be called at all if no provider configuration is given.
+ //
+ // Configure returns an error if it occurred.
+ Configure(*ResourceConfig) error
+
+ // Resources returns all the available resource types that this provider
+ // knows how to manage.
+ Resources() []ResourceType
+
+ // Stop is called when the provider should halt any in-flight actions.
+ //
+ // This can be used to make a nicer Ctrl-C experience for Terraform.
+ // Even if this isn't implemented to do anything (just returns nil),
+ // Terraform will still cleanly stop after the currently executing
+ // graph node is complete. However, this API can be used to make more
+ // efficient halts.
+ //
+ // Stop doesn't have to and shouldn't block waiting for in-flight actions
+ // to complete. It should take any action it wants and return immediately
+ // acknowledging it has received the stop request. Terraform core will
+ // automatically not make any further API calls to the provider soon
+ // after Stop is called (technically exactly once the currently executing
+ // graph nodes are complete).
+ //
+ // The error returned, if non-nil, is assumed to mean that signaling the
+ // stop somehow failed and that the user should expect potentially waiting
+ // a longer period of time.
+ Stop() error
+
+ /*********************************************************************
+ * Functions related to individual resources
+ *********************************************************************/
+
+ // ValidateResource is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per resource.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ ValidateResource(string, *ResourceConfig) ([]string, []error)
+
+ // Apply applies a diff to a specific resource and returns the new
+ // resource state along with an error.
+ //
+ // If the resource state given has an empty ID, then a new resource
+ // is expected to be created.
+ Apply(
+ *InstanceInfo,
+ *InstanceState,
+ *InstanceDiff) (*InstanceState, error)
+
+ // Diff diffs a resource versus a desired state and returns
+ // a diff.
+ Diff(
+ *InstanceInfo,
+ *InstanceState,
+ *ResourceConfig) (*InstanceDiff, error)
+
+ // Refresh refreshes a resource and updates all of its attributes
+ // with the latest information.
+ Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
+
+ /*********************************************************************
+ * Functions related to importing
+ *********************************************************************/
+
+ // ImportState requests that the given resource be imported.
+ //
+ // The returned InstanceState only requires ID be set. Importing
+ // will always call Refresh after the state to complete it.
+ //
+ // IMPORTANT: InstanceState doesn't have the resource type attached
+ // to it. A type must be specified on the state via the Ephemeral
+ // field on the state.
+ //
+ // This function can return multiple states. Normally, an import
+ // will map 1:1 to a physical resource. However, some resources map
+ // to multiple. For example, an AWS security group may contain many rules.
+ // Each rule is represented by a separate resource in Terraform,
+ // therefore multiple states are returned.
+ ImportState(*InstanceInfo, string) ([]*InstanceState, error)
+
+ /*********************************************************************
+ * Functions related to data resources
+ *********************************************************************/
+
+ // ValidateDataSource is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per data source instance.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ ValidateDataSource(string, *ResourceConfig) ([]string, []error)
+
+ // DataSources returns all of the available data sources that this
+ // provider implements.
+ DataSources() []DataSource
+
+ // ReadDataDiff produces a diff that represents the state that will
+ // be produced when the given data source is read using a later call
+ // to ReadDataApply.
+ ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+
+ // ReadDataApply initializes a data instance using the configuration
+ // in a diff produced by ReadDataDiff.
+ ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+}
+
+// ResourceProviderCloser is an interface that providers that can close
+// connections that aren't needed anymore must implement.
+type ResourceProviderCloser interface {
+ Close() error
+}
+
+// ResourceType is a type of resource that a resource provider can manage.
+type ResourceType struct {
+ Name string // Name of the resource, example "instance" (no provider prefix)
+ Importable bool // Whether this resource supports importing
+}
+
+// DataSource is a data source that a resource provider implements.
+type DataSource struct {
+ Name string
+}
+
+// ResourceProviderFactory is a function type that creates a new instance
+// of a resource provider.
+type ResourceProviderFactory func() (ResourceProvider, error)
+
+// ResourceProviderFactoryFixed is a helper that creates a
+// ResourceProviderFactory that just returns some fixed provider.
+func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
+ return func() (ResourceProvider, error) {
+ return p, nil
+ }
+}
+
+func ProviderHasResource(p ResourceProvider, n string) bool {
+ for _, rt := range p.Resources() {
+ if rt.Name == n {
+ return true
+ }
+ }
+
+ return false
+}
+
+func ProviderHasDataSource(p ResourceProvider, n string) bool {
+ for _, rt := range p.DataSources() {
+ if rt.Name == n {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644
index 00000000..f5315339
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -0,0 +1,297 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvider implements ResourceProvider but mocks out all the
+// calls for testing purposes.
+type MockResourceProvider struct {
+ sync.Mutex
+
+ // Anything you want, in case you need to store extra data with the mock.
+ Meta interface{}
+
+ CloseCalled bool
+ CloseError error
+ InputCalled bool
+ InputInput UIInput
+ InputConfig *ResourceConfig
+ InputReturnConfig *ResourceConfig
+ InputReturnError error
+ InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error)
+ ApplyCalled bool
+ ApplyInfo *InstanceInfo
+ ApplyState *InstanceState
+ ApplyDiff *InstanceDiff
+ ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
+ ApplyReturn *InstanceState
+ ApplyReturnError error
+ ConfigureCalled bool
+ ConfigureConfig *ResourceConfig
+ ConfigureFn func(*ResourceConfig) error
+ ConfigureReturnError error
+ DiffCalled bool
+ DiffInfo *InstanceInfo
+ DiffState *InstanceState
+ DiffDesired *ResourceConfig
+ DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
+ DiffReturn *InstanceDiff
+ DiffReturnError error
+ RefreshCalled bool
+ RefreshInfo *InstanceInfo
+ RefreshState *InstanceState
+ RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error)
+ RefreshReturn *InstanceState
+ RefreshReturnError error
+ ResourcesCalled bool
+ ResourcesReturn []ResourceType
+ ReadDataApplyCalled bool
+ ReadDataApplyInfo *InstanceInfo
+ ReadDataApplyDiff *InstanceDiff
+ ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+ ReadDataApplyReturn *InstanceState
+ ReadDataApplyReturnError error
+ ReadDataDiffCalled bool
+ ReadDataDiffInfo *InstanceInfo
+ ReadDataDiffDesired *ResourceConfig
+ ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+ ReadDataDiffReturn *InstanceDiff
+ ReadDataDiffReturnError error
+ StopCalled bool
+ StopFn func() error
+ StopReturnError error
+ DataSourcesCalled bool
+ DataSourcesReturn []DataSource
+ ValidateCalled bool
+ ValidateConfig *ResourceConfig
+ ValidateFn func(*ResourceConfig) ([]string, []error)
+ ValidateReturnWarns []string
+ ValidateReturnErrors []error
+ ValidateResourceFn func(string, *ResourceConfig) ([]string, []error)
+ ValidateResourceCalled bool
+ ValidateResourceType string
+ ValidateResourceConfig *ResourceConfig
+ ValidateResourceReturnWarns []string
+ ValidateResourceReturnErrors []error
+ ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error)
+ ValidateDataSourceCalled bool
+ ValidateDataSourceType string
+ ValidateDataSourceConfig *ResourceConfig
+ ValidateDataSourceReturnWarns []string
+ ValidateDataSourceReturnErrors []error
+
+ ImportStateCalled bool
+ ImportStateInfo *InstanceInfo
+ ImportStateID string
+ ImportStateReturn []*InstanceState
+ ImportStateReturnError error
+ ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error)
+}
+
+func (p *MockResourceProvider) Close() error {
+ p.CloseCalled = true
+ return p.CloseError
+}
+
+func (p *MockResourceProvider) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ p.InputCalled = true
+ p.InputInput = input
+ p.InputConfig = c
+ if p.InputFn != nil {
+ return p.InputFn(input, c)
+ }
+ return p.InputReturnConfig, p.InputReturnError
+}
+
+func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateCalled = true
+ p.ValidateConfig = c
+ if p.ValidateFn != nil {
+ return p.ValidateFn(c)
+ }
+ return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateResourceCalled = true
+ p.ValidateResourceType = t
+ p.ValidateResourceConfig = c
+
+ if p.ValidateResourceFn != nil {
+ return p.ValidateResourceFn(t, c)
+ }
+
+ return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
+}
+
+func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ConfigureCalled = true
+ p.ConfigureConfig = c
+
+ if p.ConfigureFn != nil {
+ return p.ConfigureFn(c)
+ }
+
+ return p.ConfigureReturnError
+}
+
+func (p *MockResourceProvider) Stop() error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.StopCalled = true
+ if p.StopFn != nil {
+ return p.StopFn()
+ }
+
+ return p.StopReturnError
+}
+
+func (p *MockResourceProvider) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // We only lock while writing data. Reading is fine
+ p.Lock()
+ p.ApplyCalled = true
+ p.ApplyInfo = info
+ p.ApplyState = state
+ p.ApplyDiff = diff
+ p.Unlock()
+
+ if p.ApplyFn != nil {
+ return p.ApplyFn(info, state, diff)
+ }
+
+ return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
+}
+
+func (p *MockResourceProvider) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.DiffCalled = true
+ p.DiffInfo = info
+ p.DiffState = state
+ p.DiffDesired = desired
+ if p.DiffFn != nil {
+ return p.DiffFn(info, state, desired)
+ }
+
+ return p.DiffReturn.DeepCopy(), p.DiffReturnError
+}
+
+func (p *MockResourceProvider) Refresh(
+ info *InstanceInfo,
+ s *InstanceState) (*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.RefreshCalled = true
+ p.RefreshInfo = info
+ p.RefreshState = s
+
+ if p.RefreshFn != nil {
+ return p.RefreshFn(info, s)
+ }
+
+ return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
+}
+
+func (p *MockResourceProvider) Resources() []ResourceType {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ResourcesCalled = true
+ return p.ResourcesReturn
+}
+
+func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ImportStateCalled = true
+ p.ImportStateInfo = info
+ p.ImportStateID = id
+ if p.ImportStateFn != nil {
+ return p.ImportStateFn(info, id)
+ }
+
+ var result []*InstanceState
+ if p.ImportStateReturn != nil {
+ result = make([]*InstanceState, len(p.ImportStateReturn))
+ for i, v := range p.ImportStateReturn {
+ result[i] = v.DeepCopy()
+ }
+ }
+
+ return result, p.ImportStateReturnError
+}
+
+func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateDataSourceCalled = true
+ p.ValidateDataSourceType = t
+ p.ValidateDataSourceConfig = c
+
+ if p.ValidateDataSourceFn != nil {
+ return p.ValidateDataSourceFn(t, c)
+ }
+
+ return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
+}
+
+func (p *MockResourceProvider) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ReadDataDiffCalled = true
+ p.ReadDataDiffInfo = info
+ p.ReadDataDiffDesired = desired
+ if p.ReadDataDiffFn != nil {
+ return p.ReadDataDiffFn(info, desired)
+ }
+
+ return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
+}
+
+func (p *MockResourceProvider) ReadDataApply(
+ info *InstanceInfo,
+ d *InstanceDiff) (*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ReadDataApplyCalled = true
+ p.ReadDataApplyInfo = info
+ p.ReadDataApplyDiff = d
+
+ if p.ReadDataApplyFn != nil {
+ return p.ReadDataApplyFn(info, d)
+ }
+
+ return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
+}
+
+func (p *MockResourceProvider) DataSources() []DataSource {
+ p.Lock()
+ defer p.Unlock()
+
+ p.DataSourcesCalled = true
+ return p.DataSourcesReturn
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644
index 00000000..361ec1ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -0,0 +1,54 @@
+package terraform
+
+// ResourceProvisioner is an interface that must be implemented by any
+// resource provisioner: the thing that initializes resources in
+// a Terraform configuration.
+type ResourceProvisioner interface {
+ // Validate is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per resource.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ Validate(*ResourceConfig) ([]string, []error)
+
+ // Apply runs the provisioner on a specific resource and returns the new
+ // resource state along with an error. Instead of a diff, the ResourceConfig
+ // is provided since provisioners only run after a resource has been
+ // newly created.
+ Apply(UIOutput, *InstanceState, *ResourceConfig) error
+
+ // Stop is called when the provisioner should halt any in-flight actions.
+ //
+ // This can be used to make a nicer Ctrl-C experience for Terraform.
+ // Even if this isn't implemented to do anything (just returns nil),
+ // Terraform will still cleanly stop after the currently executing
+ // graph node is complete. However, this API can be used to make more
+ // efficient halts.
+ //
+ // Stop doesn't have to and shouldn't block waiting for in-flight actions
+ // to complete. It should take any action it wants and return immediately
+ // acknowledging it has received the stop request. Terraform core will
+ // automatically not make any further API calls to the provider soon
+ // after Stop is called (technically exactly once the currently executing
+ // graph nodes are complete).
+ //
+ // The error returned, if non-nil, is assumed to mean that signaling the
+ // stop somehow failed and that the user should expect potentially waiting
+ // a longer period of time.
+ Stop() error
+}
+
+// ResourceProvisionerCloser is an interface that provisioners that can close
+// connections that aren't needed anymore must implement.
+type ResourceProvisionerCloser interface {
+ Close() error
+}
+
+// ResourceProvisionerFactory is a function type that creates a new instance
+// of a resource provisioner.
+type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644
index 00000000..f471a518
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -0,0 +1,72 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
+// calls for testing purposes.
+type MockResourceProvisioner struct {
+ sync.Mutex
+ // Anything you want, in case you need to store extra data with the mock.
+ Meta interface{}
+
+ ApplyCalled bool
+ ApplyOutput UIOutput
+ ApplyState *InstanceState
+ ApplyConfig *ResourceConfig
+ ApplyFn func(*InstanceState, *ResourceConfig) error
+ ApplyReturnError error
+
+ ValidateCalled bool
+ ValidateConfig *ResourceConfig
+ ValidateFn func(c *ResourceConfig) ([]string, []error)
+ ValidateReturnWarns []string
+ ValidateReturnErrors []error
+
+ StopCalled bool
+ StopFn func() error
+ StopReturnError error
+}
+
+func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateCalled = true
+ p.ValidateConfig = c
+ if p.ValidateFn != nil {
+ return p.ValidateFn(c)
+ }
+ return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvisioner) Apply(
+ output UIOutput,
+ state *InstanceState,
+ c *ResourceConfig) error {
+ p.Lock()
+
+ p.ApplyCalled = true
+ p.ApplyOutput = output
+ p.ApplyState = state
+ p.ApplyConfig = c
+ if p.ApplyFn != nil {
+ fn := p.ApplyFn
+ p.Unlock()
+ return fn(state, c)
+ }
+
+ defer p.Unlock()
+ return p.ApplyReturnError
+}
+
+func (p *MockResourceProvisioner) Stop() error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.StopCalled = true
+ if p.StopFn != nil {
+ return p.StopFn()
+ }
+
+ return p.StopReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644
index 00000000..20f1d8a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphSemanticChecker is the interface that semantic checks across
+// the entire Terraform graph implement.
+//
+// The graph should NOT be modified by the semantic checker.
+type GraphSemanticChecker interface {
+ Check(*dag.Graph) error
+}
+
+// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
+// that runs a list of SemanticCheckers against the vertices of the graph
+// in no specified order.
+type UnorderedSemanticCheckRunner struct {
+ Checks []SemanticChecker
+}
+
+func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
+ var err error
+ for _, v := range g.Vertices() {
+ for _, check := range sc.Checks {
+ if e := check.Check(g, v); e != nil {
+ err = multierror.Append(err, e)
+ }
+ }
+ }
+
+ return err
+}
+
+// SemanticChecker is the interface that semantic checks across the
+// Terraform graph implement. Errors are accumulated. Even after an error
+// is returned, child vertices in the graph will still be visited.
+//
+// The graph should NOT be modified by the semantic checker.
+//
+// The order in which vertices are visited is left unspecified, so the
+// semantic checks should not rely on that.
+type SemanticChecker interface {
+ Check(*dag.Graph, dag.Vertex) error
+}
+
+// smcUserVariables does all the semantic checks to verify that the
+// variables given satisfy the configuration itself.
+func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
+ var errs []error
+
+ cvs := make(map[string]*config.Variable)
+ for _, v := range c.Variables {
+ cvs[v.Name] = v
+ }
+
+ // Check that all required variables are present
+ required := make(map[string]struct{})
+ for _, v := range c.Variables {
+ if v.Required() {
+ required[v.Name] = struct{}{}
+ }
+ }
+ for k, _ := range vs {
+ delete(required, k)
+ }
+ if len(required) > 0 {
+ for k, _ := range required {
+ errs = append(errs, fmt.Errorf(
+ "Required variable not set: %s", k))
+ }
+ }
+
+ // Check that types match up
+ for name, proposedValue := range vs {
+ // Check for "map.key" fields. These stopped working with Terraform
+ // 0.7 but we do this to surface a better error message informing
+ // the user what happened.
+ if idx := strings.Index(name, "."); idx > 0 {
+ key := name[:idx]
+ if _, ok := cvs[key]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: Overriding map keys with the format `name.key` is no "+
+ "longer allowed. You may still override keys by setting "+
+ "`name = { key = value }`. The maps will be merged. This "+
+ "behavior appeared in 0.7.0.", name))
+ continue
+ }
+ }
+
+ schema, ok := cvs[name]
+ if !ok {
+ continue
+ }
+
+ declaredType := schema.Type()
+
+ switch declaredType {
+ case config.VariableTypeString:
+ switch proposedValue.(type) {
+ case string:
+ continue
+ }
+ case config.VariableTypeMap:
+ switch v := proposedValue.(type) {
+ case map[string]interface{}:
+ continue
+ case []map[string]interface{}:
+ // if we have a list of 1 map, it will get coerced later as needed
+ if len(v) == 1 {
+ continue
+ }
+ }
+ case config.VariableTypeList:
+ switch proposedValue.(type) {
+ case []interface{}:
+ continue
+ }
+ }
+ errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
+ name, declaredType.Printable(), hclTypeName(proposedValue)))
+ }
+
+ // TODO(mitchellh): variables that are unknown
+
+ return errs
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644
index 00000000..46325595
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
@@ -0,0 +1,28 @@
+package terraform
+
+// Shadow is the interface that any "shadow" structures must implement.
+//
+// A shadow structure is an interface implementation (typically) that
+// shadows a real implementation and verifies that the same behavior occurs
+// on both. The semantics of this behavior are up to the interface itself.
+//
+// A shadow NEVER modifies real values or state. It must always be safe to use.
+//
+// For example, a ResourceProvider shadow ensures that the same operations
+// are done on the same resources with the same configurations.
+//
+// The typical usage of a shadow following this interface is to complete
+// the real operations, then call CloseShadow which tells the shadow that
+// the real side is done. Then, once the shadow is also complete, call
+// ShadowError to find any errors that may have been caught.
+type Shadow interface {
+ // CloseShadow tells the shadow that the REAL implementation is
+ // complete. Therefore, any calls that would block should now return
+ // immediately since no more changes will happen to the real side.
+ CloseShadow() error
+
+ // ShadowError returns the errors that the shadow has found.
+ // This should be called AFTER CloseShadow and AFTER the shadow is
+ // known to be complete (no more calls to it).
+ ShadowError() error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644
index 00000000..116cf84f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
@@ -0,0 +1,273 @@
+package terraform
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// newShadowComponentFactory creates a shadowed contextComponentFactory
+// so that requests to create new components result in both a real and
+// shadow side.
+func newShadowComponentFactory(
+ f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
+ // Create the shared data
+ shared := &shadowComponentFactoryShared{contextComponentFactory: f}
+
+ // Create the real side
+ real := &shadowComponentFactory{
+ shadowComponentFactoryShared: shared,
+ }
+
+ // Create the shadow
+ shadow := &shadowComponentFactory{
+ shadowComponentFactoryShared: shared,
+ Shadow: true,
+ }
+
+ return real, shadow
+}
+
+// shadowComponentFactory is the shadow side. Any components created
+// with this factory are fake and will not cause real work to happen.
+//
+// Unlike other shadowers, the shadow component factory will allow the
+// shadow to create _any_ component even if it is never requested on the
+// real side. This is because errors will happen later downstream as function
+// calls are made to the shadows that are never matched on the real side.
+type shadowComponentFactory struct {
+ *shadowComponentFactoryShared
+
+ Shadow bool // True if this should return the shadow
+ lock sync.Mutex
+}
+
+func (f *shadowComponentFactory) ResourceProvider(
+ n, uid string) (ResourceProvider, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
+ var result ResourceProvider = real
+ if f.Shadow {
+ result = shadow
+ }
+
+ return result, err
+}
+
+func (f *shadowComponentFactory) ResourceProvisioner(
+ n, uid string) (ResourceProvisioner, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
+ var result ResourceProvisioner = real
+ if f.Shadow {
+ result = shadow
+ }
+
+ return result, err
+}
+
+// CloseShadow is called when the _real_ side is complete. This will cause
+// all future blocking operations to return immediately on the shadow to
+// ensure the shadow also completes.
+func (f *shadowComponentFactory) CloseShadow() error {
+ // If we aren't the shadow, just return
+ if !f.Shadow {
+ return nil
+ }
+
+ // Lock ourselves so we don't modify state
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Grab our shared state
+ shared := f.shadowComponentFactoryShared
+
+ // If we're already closed, its an error
+ if shared.closed {
+ return fmt.Errorf("component factory shadow already closed")
+ }
+
+ // Close all the providers and provisioners and return the error
+ var result error
+ for _, n := range shared.providerKeys {
+ _, shadow, err := shared.ResourceProvider(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.CloseShadow(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ for _, n := range shared.provisionerKeys {
+ _, shadow, err := shared.ResourceProvisioner(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.CloseShadow(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ // Mark ourselves as closed
+ shared.closed = true
+
+ return result
+}
+
+func (f *shadowComponentFactory) ShadowError() error {
+ // If we aren't the shadow, just return
+ if !f.Shadow {
+ return nil
+ }
+
+ // Lock ourselves so we don't modify state
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Grab our shared state
+ shared := f.shadowComponentFactoryShared
+
+ // If we're not closed, its an error
+ if !shared.closed {
+ return fmt.Errorf("component factory must be closed to retrieve errors")
+ }
+
+ // Close all the providers and provisioners and return the error
+ var result error
+ for _, n := range shared.providerKeys {
+ _, shadow, err := shared.ResourceProvider(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.ShadowError(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ for _, n := range shared.provisionerKeys {
+ _, shadow, err := shared.ResourceProvisioner(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.ShadowError(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ return result
+}
+
+// shadowComponentFactoryShared is shared data between the two factories.
+//
+// It is NOT SAFE to run any function on this struct in parallel. Lock
+// access to this struct.
+type shadowComponentFactoryShared struct {
+ contextComponentFactory
+
+ closed bool
+ providers shadow.KeyedValue
+ providerKeys []string
+ provisioners shadow.KeyedValue
+ provisionerKeys []string
+}
+
+// shadowResourceProviderFactoryEntry is the entry that is stored in
+// the Shadows key/value for a provider.
+type shadowComponentFactoryProviderEntry struct {
+ Real ResourceProvider
+ Shadow shadowResourceProvider
+ Err error
+}
+
+type shadowComponentFactoryProvisionerEntry struct {
+ Real ResourceProvisioner
+ Shadow shadowResourceProvisioner
+ Err error
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvider(
+ n, uid string) (ResourceProvider, shadowResourceProvider, error) {
+ // Determine if we already have a value
+ raw, ok := f.providers.ValueOk(uid)
+ if !ok {
+ // Build the entry
+ var entry shadowComponentFactoryProviderEntry
+
+ // No value, initialize. Create the original
+ p, err := f.contextComponentFactory.ResourceProvider(n, uid)
+ if err != nil {
+ entry.Err = err
+ p = nil // Just to be sure
+ }
+
+ if p != nil {
+ // Create the shadow
+ real, shadow := newShadowResourceProvider(p)
+ entry.Real = real
+ entry.Shadow = shadow
+
+ if f.closed {
+ shadow.CloseShadow()
+ }
+ }
+
+ // Store the value
+ f.providers.SetValue(uid, &entry)
+ f.providerKeys = append(f.providerKeys, uid)
+ raw = &entry
+ }
+
+ // Read the entry
+ entry, ok := raw.(*shadowComponentFactoryProviderEntry)
+ if !ok {
+ return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
+ }
+
+ // Return
+ return entry.Real, entry.Shadow, entry.Err
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvisioner(
+ n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
+ // Determine if we already have a value
+ raw, ok := f.provisioners.ValueOk(uid)
+ if !ok {
+ // Build the entry
+ var entry shadowComponentFactoryProvisionerEntry
+
+ // No value, initialize. Create the original
+ p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
+ if err != nil {
+ entry.Err = err
+ p = nil // Just to be sure
+ }
+
+ if p != nil {
+ // For now, just create a mock since we don't support provisioners yet
+ real, shadow := newShadowResourceProvisioner(p)
+ entry.Real = real
+ entry.Shadow = shadow
+
+ if f.closed {
+ shadow.CloseShadow()
+ }
+ }
+
+ // Store the value
+ f.provisioners.SetValue(uid, &entry)
+ f.provisionerKeys = append(f.provisionerKeys, uid)
+ raw = &entry
+ }
+
+ // Read the entry
+ entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
+ if !ok {
+ return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
+ }
+
+ // Return
+ return entry.Real, entry.Shadow, entry.Err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644
index 00000000..5588af25
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mitchellh/copystructure"
+)
+
+// newShadowContext creates a new context that will shadow the given context
+// when walking the graph. The resulting context should be used _only once_
+// for a graph walk.
+//
+// The returned Shadow should be closed after the graph walk with the
+// real context is complete. Errors from the shadow can be retrieved there.
+//
+// Most importantly, any operations done on the shadow context (the returned
+// context) will NEVER affect the real context. All structures are deep
+// copied, no real providers or resources are used, etc.
+func newShadowContext(c *Context) (*Context, *Context, Shadow) {
+ // Copy the targets
+ targetRaw, err := copystructure.Copy(c.targets)
+ if err != nil {
+ panic(err)
+ }
+
+ // Copy the variables
+ varRaw, err := copystructure.Copy(c.variables)
+ if err != nil {
+ panic(err)
+ }
+
+ // Copy the provider inputs
+ providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ // The factories
+ componentsReal, componentsShadow := newShadowComponentFactory(c.components)
+
+ // Create the shadow
+ shadow := &Context{
+ components: componentsShadow,
+ destroy: c.destroy,
+ diff: c.diff.DeepCopy(),
+ hooks: nil,
+ meta: c.meta,
+ module: c.module,
+ state: c.state.DeepCopy(),
+ targets: targetRaw.([]string),
+ variables: varRaw.(map[string]interface{}),
+
+ // NOTE(mitchellh): This is not going to work for shadows that are
+ // testing that input results in the proper end state. At the time
+ // of writing, input is not used in any state-changing graph
+ // walks anyways, so this checks nothing. We set it to this to avoid
+ // any panics but even a "nil" value worked here.
+ uiInput: new(MockUIInput),
+
+ // Hardcoded to 4 since parallelism in the shadow doesn't matter
+ // a ton since we're doing far less compared to the real side
+ // and our operations are MUCH faster.
+ parallelSem: NewSemaphore(4),
+ providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
+ }
+
+ // Create the real context. This is effectively just a copy of
+ // the context given except we need to modify some of the values
+ // to point to the real side of a shadow so the shadow can compare values.
+ real := &Context{
+ // The fields below are changed.
+ components: componentsReal,
+
+ // The fields below are direct copies
+ destroy: c.destroy,
+ diff: c.diff,
+ // diffLock - no copy
+ hooks: c.hooks,
+ meta: c.meta,
+ module: c.module,
+ sh: c.sh,
+ state: c.state,
+ // stateLock - no copy
+ targets: c.targets,
+ uiInput: c.uiInput,
+ variables: c.variables,
+
+ // l - no copy
+ parallelSem: c.parallelSem,
+ providerInputConfig: c.providerInputConfig,
+ runContext: c.runContext,
+ runContextCancel: c.runContextCancel,
+ shadowErr: c.shadowErr,
+ }
+
+ return real, shadow, &shadowContextCloser{
+ Components: componentsShadow,
+ }
+}
+
+// shadowContextVerify takes the real and shadow context and verifies they
+// have equal diffs and states.
+func shadowContextVerify(real, shadow *Context) error {
+ var result error
+
+ // The states compared must be pruned so they're minimal/clean
+ real.state.prune()
+ shadow.state.prune()
+
+ // Compare the states
+ if !real.state.Equal(shadow.state) {
+ result = multierror.Append(result, fmt.Errorf(
+ "Real and shadow states do not match! "+
+ "Real state:\n\n%s\n\n"+
+ "Shadow state:\n\n%s\n\n",
+ real.state, shadow.state))
+ }
+
+ // Compare the diffs
+ if !real.diff.Equal(shadow.diff) {
+ result = multierror.Append(result, fmt.Errorf(
+ "Real and shadow diffs do not match! "+
+ "Real diff:\n\n%s\n\n"+
+ "Shadow diff:\n\n%s\n\n",
+ real.diff, shadow.diff))
+ }
+
+ return result
+}
+
+// shadowContextCloser is the io.Closer returned by newShadowContext that
+// closes all the shadows and returns the results.
+type shadowContextCloser struct {
+ Components *shadowComponentFactory
+}
+
+// Close closes the shadow context.
+func (c *shadowContextCloser) CloseShadow() error {
+ return c.Components.CloseShadow()
+}
+
+func (c *shadowContextCloser) ShadowError() error {
+ err := c.Components.ShadowError()
+ if err == nil {
+ return nil
+ }
+
+ // This is a sad edge case: if the configuration contains uuid() at
+ // any point, we cannot reason aboyt the shadow execution. Tested
+ // with Context2Plan_shadowUuid.
+ if strings.Contains(err.Error(), "uuid()") {
+ err = nil
+ }
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644
index 00000000..9741d7e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
@@ -0,0 +1,815 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvider implements ResourceProvider for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provider. This shouldn't
+// be used directly.
+type shadowResourceProvider interface {
+ ResourceProvider
+ Shadow
+}
+
+// newShadowResourceProvider creates a new shadowed ResourceProvider.
+//
+// This will assume a well behaved real ResourceProvider. For example,
+// it assumes that the `Resources` call underneath doesn't change values
+// since once it is called on the real provider, it will be cached and
+// returned in the shadow since number of calls to that shouldn't affect
+// actual behavior.
+//
+// However, with calls like Apply, call order is taken into account,
+// parameters are checked for equality, etc.
+func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
+ // Create the shared data
+ shared := shadowResourceProviderShared{}
+
+ // Create the real provider that does actual work
+ real := &shadowResourceProviderReal{
+ ResourceProvider: p,
+ Shared: &shared,
+ }
+
+ // Create the shadow that watches the real value
+ shadow := &shadowResourceProviderShadow{
+ Shared: &shared,
+
+ resources: p.Resources(),
+ dataSources: p.DataSources(),
+ }
+
+ return real, shadow
+}
+
+// shadowResourceProviderReal is the real resource provider. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProviderReal struct {
+ ResourceProvider
+
+ Shared *shadowResourceProviderShared
+}
+
+func (p *shadowResourceProviderReal) Close() error {
+ var result error
+ if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
+ result = c.Close()
+ }
+
+ p.Shared.CloseErr.SetValue(result)
+ return result
+}
+
+func (p *shadowResourceProviderReal) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ cCopy := c.DeepCopy()
+
+ result, err := p.ResourceProvider.Input(input, c)
+ p.Shared.Input.SetValue(&shadowResourceProviderInput{
+ Config: cCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
+ warns, errs := p.ResourceProvider.Validate(c)
+ p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
+ Config: c.DeepCopy(),
+ ResultWarn: warns,
+ ResultErr: errs,
+ })
+
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
+ cCopy := c.DeepCopy()
+
+ err := p.ResourceProvider.Configure(c)
+ p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
+ Config: cCopy,
+ Result: err,
+ })
+
+ return err
+}
+
+func (p *shadowResourceProviderReal) Stop() error {
+ return p.ResourceProvider.Stop()
+}
+
+func (p *shadowResourceProviderReal) ValidateResource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ key := t
+ configCopy := c.DeepCopy()
+
+ // Real operation
+ warns, errs := p.ResourceProvider.ValidateResource(t, c)
+
+ // Initialize to ensure we always have a wrapper with a lock
+ p.Shared.ValidateResource.Init(
+ key, &shadowResourceProviderValidateResourceWrapper{})
+
+ // Get the result
+ raw := p.Shared.ValidateResource.Value(key)
+ wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+ if !ok {
+ // If this fails then we just continue with our day... the shadow
+ // will fail to but there isn't much we can do.
+ log.Printf(
+ "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
+ return warns, errs
+ }
+
+ // Lock the wrapper for writing and record our call
+ wrapper.Lock()
+ defer wrapper.Unlock()
+
+ wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
+ Config: configCopy,
+ Warns: warns,
+ Errors: errs,
+ })
+
+ // With it locked, call SetValue again so that it triggers WaitForChange
+ p.Shared.ValidateResource.SetValue(key, wrapper)
+
+ // Return the result
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+ diffCopy := diff.DeepCopy()
+
+ result, err := p.ResourceProvider.Apply(info, state, diff)
+ p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
+ State: stateCopy,
+ Diff: diffCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+ desiredCopy := desired.DeepCopy()
+
+ result, err := p.ResourceProvider.Diff(info, state, desired)
+ p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
+ State: stateCopy,
+ Desired: desiredCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Refresh(
+ info *InstanceInfo,
+ state *InstanceState) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+
+ result, err := p.ResourceProvider.Refresh(info, state)
+ p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
+ State: stateCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) ValidateDataSource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ key := t
+ configCopy := c.DeepCopy()
+
+ // Real operation
+ warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
+
+ // Initialize
+ p.Shared.ValidateDataSource.Init(
+ key, &shadowResourceProviderValidateDataSourceWrapper{})
+
+ // Get the result
+ raw := p.Shared.ValidateDataSource.Value(key)
+ wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+ if !ok {
+ // If this fails then we just continue with our day... the shadow
+ // will fail to but there isn't much we can do.
+ log.Printf(
+ "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
+ return warns, errs
+ }
+
+ // Lock the wrapper for writing and record our call
+ wrapper.Lock()
+ defer wrapper.Unlock()
+
+ wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
+ Config: configCopy,
+ Warns: warns,
+ Errors: errs,
+ })
+
+ // Set it
+ p.Shared.ValidateDataSource.SetValue(key, wrapper)
+
+ // Return the result
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // These have to be copied before the call since call can modify
+ desiredCopy := desired.DeepCopy()
+
+ result, err := p.ResourceProvider.ReadDataDiff(info, desired)
+ p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
+ Desired: desiredCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) ReadDataApply(
+ info *InstanceInfo,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ diffCopy := diff.DeepCopy()
+
+ result, err := p.ResourceProvider.ReadDataApply(info, diff)
+ p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
+ Diff: diffCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+// shadowResourceProviderShadow is the shadow resource provider. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProviderShadow struct {
+ Shared *shadowResourceProviderShared
+
+ // Cached values that are expected to not change
+ resources []ResourceType
+ dataSources []DataSource
+
+ Error error // Error is the list of errors from the shadow
+ ErrorLock sync.Mutex
+}
+
+type shadowResourceProviderShared struct {
+ // NOTE: Anytime a value is added here, be sure to add it to
+ // the Close() method so that it is closed.
+
+ CloseErr shadow.Value
+ Input shadow.Value
+ Validate shadow.Value
+ Configure shadow.Value
+ ValidateResource shadow.KeyedValue
+ Apply shadow.KeyedValue
+ Diff shadow.KeyedValue
+ Refresh shadow.KeyedValue
+ ValidateDataSource shadow.KeyedValue
+ ReadDataDiff shadow.KeyedValue
+ ReadDataApply shadow.KeyedValue
+}
+
+func (p *shadowResourceProviderShared) Close() error {
+ return shadow.Close(p)
+}
+
+func (p *shadowResourceProviderShadow) CloseShadow() error {
+ err := p.Shared.Close()
+ if err != nil {
+ err = fmt.Errorf("close error: %s", err)
+ }
+
+ return err
+}
+
+func (p *shadowResourceProviderShadow) ShadowError() error {
+ return p.Error
+}
+
+func (p *shadowResourceProviderShadow) Resources() []ResourceType {
+ return p.resources
+}
+
+func (p *shadowResourceProviderShadow) DataSources() []DataSource {
+ return p.dataSources
+}
+
+func (p *shadowResourceProviderShadow) Close() error {
+ v := p.Shared.CloseErr.Value()
+ if v == nil {
+ return nil
+ }
+
+ return v.(error)
+}
+
+func (p *shadowResourceProviderShadow) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ // Get the result of the input call
+ raw := p.Shared.Input.Value()
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderInput)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'input' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
+ // Get the result of the validate call
+ raw := p.Shared.Validate.Value()
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderValidate)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'validate' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
+ // Get the result of the call
+ raw := p.Shared.Configure.Value()
+ if raw == nil {
+ return nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderConfigure)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'configure' shadow value: %#v", raw))
+ return nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.Result
+}
+
+// Stop returns immediately.
+func (p *shadowResourceProviderShadow) Stop() error {
+ return nil
+}
+
+func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+ // Unique key
+ key := t
+
+ // Get the initial value
+ raw := p.Shared.ValidateResource.Value(key)
+
+ // Find a validation with our configuration
+ var result *shadowResourceProviderValidateResource
+ for {
+ // Get the value
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateResource' call for %q:\n\n%#v",
+ key, c))
+ return nil, nil
+ }
+
+ wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Look for the matching call with our configuration
+ wrapper.RLock()
+ for _, call := range wrapper.Calls {
+ if call.Config.Equal(c) {
+ result = call
+ break
+ }
+ }
+ wrapper.RUnlock()
+
+ // If we found a result, exit
+ if result != nil {
+ break
+ }
+
+ // Wait for a change so we can get the wrapper again
+ raw = p.Shared.ValidateResource.WaitForChange(key)
+ }
+
+ return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Apply.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
+ key, state, diff))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+
+ if !diff.Equal(result.Diff) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Diff, diff))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Diff.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
+ key, state, desired))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderDiff)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'diff' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+ if !desired.Equal(result.Desired) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Desired, desired))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Refresh(
+ info *InstanceInfo,
+ state *InstanceState) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Refresh.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'refresh' call for %q:\n\n%#v",
+ key, state))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderRefresh)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'refresh' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ValidateDataSource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ // Unique key
+ key := t
+
+ // Get the initial value
+ raw := p.Shared.ValidateDataSource.Value(key)
+
+ // Find a validation with our configuration
+ var result *shadowResourceProviderValidateDataSource
+ for {
+ // Get the value
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
+ key, c))
+ return nil, nil
+ }
+
+ wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateDataSource' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Look for the matching call with our configuration
+ wrapper.RLock()
+ for _, call := range wrapper.Calls {
+ if call.Config.Equal(c) {
+ result = call
+ break
+ }
+ }
+ wrapper.RUnlock()
+
+ // If we found a result, exit
+ if result != nil {
+ break
+ }
+
+ // Wait for a change so we can get the wrapper again
+ raw = p.Shared.ValidateDataSource.WaitForChange(key)
+ }
+
+ return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.ReadDataDiff.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
+ key, desired))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderReadDataDiff)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !desired.Equal(result.Desired) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Desired, desired))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ReadDataApply(
+ info *InstanceInfo,
+ d *InstanceDiff) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.ReadDataApply.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataApply' call for %q:\n\n%#v",
+ key, d))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderReadDataApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !d.Equal(result.Diff) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+ result.Diff, d))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+ panic("import not supported by shadow graph")
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProviderInput struct {
+ Config *ResourceConfig
+ Result *ResourceConfig
+ ResultErr error
+}
+
+type shadowResourceProviderValidate struct {
+ Config *ResourceConfig
+ ResultWarn []string
+ ResultErr []error
+}
+
+type shadowResourceProviderConfigure struct {
+ Config *ResourceConfig
+ Result error
+}
+
+type shadowResourceProviderValidateResourceWrapper struct {
+ sync.RWMutex
+
+ Calls []*shadowResourceProviderValidateResource
+}
+
+type shadowResourceProviderValidateResource struct {
+ Config *ResourceConfig
+ Warns []string
+ Errors []error
+}
+
+type shadowResourceProviderApply struct {
+ State *InstanceState
+ Diff *InstanceDiff
+ Result *InstanceState
+ ResultErr error
+}
+
+type shadowResourceProviderDiff struct {
+ State *InstanceState
+ Desired *ResourceConfig
+ Result *InstanceDiff
+ ResultErr error
+}
+
+type shadowResourceProviderRefresh struct {
+ State *InstanceState
+ Result *InstanceState
+ ResultErr error
+}
+
+type shadowResourceProviderValidateDataSourceWrapper struct {
+ sync.RWMutex
+
+ Calls []*shadowResourceProviderValidateDataSource
+}
+
+type shadowResourceProviderValidateDataSource struct {
+ Config *ResourceConfig
+ Warns []string
+ Errors []error
+}
+
+type shadowResourceProviderReadDataDiff struct {
+ Desired *ResourceConfig
+ Result *InstanceDiff
+ ResultErr error
+}
+
+type shadowResourceProviderReadDataApply struct {
+ Diff *InstanceDiff
+ Result *InstanceState
+ ResultErr error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644
index 00000000..60a49088
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
@@ -0,0 +1,282 @@
+package terraform
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvisioner implements ResourceProvisioner for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provisioner. This shouldn't
+// be used directly.
+type shadowResourceProvisioner interface {
+ ResourceProvisioner
+ Shadow
+}
+
+// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
+func newShadowResourceProvisioner(
+ p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
+ // Create the shared data
+ shared := shadowResourceProvisionerShared{
+ Validate: shadow.ComparedValue{
+ Func: shadowResourceProvisionerValidateCompare,
+ },
+ }
+
+ // Create the real provisioner that does actual work
+ real := &shadowResourceProvisionerReal{
+ ResourceProvisioner: p,
+ Shared: &shared,
+ }
+
+ // Create the shadow that watches the real value
+ shadow := &shadowResourceProvisionerShadow{
+ Shared: &shared,
+ }
+
+ return real, shadow
+}
+
+// shadowResourceProvisionerReal is the real resource provisioner. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProvisionerReal struct {
+ ResourceProvisioner
+
+ Shared *shadowResourceProvisionerShared
+}
+
+func (p *shadowResourceProvisionerReal) Close() error {
+ var result error
+ if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
+ result = c.Close()
+ }
+
+ p.Shared.CloseErr.SetValue(result)
+ return result
+}
+
+func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
+ warns, errs := p.ResourceProvisioner.Validate(c)
+ p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
+ Config: c,
+ ResultWarn: warns,
+ ResultErr: errs,
+ })
+
+ return warns, errs
+}
+
+func (p *shadowResourceProvisionerReal) Apply(
+ output UIOutput, s *InstanceState, c *ResourceConfig) error {
+ err := p.ResourceProvisioner.Apply(output, s, c)
+
+ // Write the result, grab a lock for writing. This should nver
+ // block long since the operations below don't block.
+ p.Shared.ApplyLock.Lock()
+ defer p.Shared.ApplyLock.Unlock()
+
+ key := s.ID
+ raw, ok := p.Shared.Apply.ValueOk(key)
+ if !ok {
+ // Setup a new value
+ raw = &shadow.ComparedValue{
+ Func: shadowResourceProvisionerApplyCompare,
+ }
+
+ // Set it
+ p.Shared.Apply.SetValue(key, raw)
+ }
+
+ compareVal, ok := raw.(*shadow.ComparedValue)
+ if !ok {
+ // Just log and return so that we don't cause the real side
+ // any side effects.
+ log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
+ return err
+ }
+
+ // Write the resulting value
+ compareVal.SetValue(&shadowResourceProvisionerApply{
+ Config: c,
+ ResultErr: err,
+ })
+
+ return err
+}
+
+func (p *shadowResourceProvisionerReal) Stop() error {
+ return p.ResourceProvisioner.Stop()
+}
+
+// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProvisionerShadow struct {
+ Shared *shadowResourceProvisionerShared
+
+ Error error // Error is the list of errors from the shadow
+ ErrorLock sync.Mutex
+}
+
+type shadowResourceProvisionerShared struct {
+ // NOTE: Anytime a value is added here, be sure to add it to
+ // the Close() method so that it is closed.
+
+ CloseErr shadow.Value
+ Validate shadow.ComparedValue
+ Apply shadow.KeyedValue
+ ApplyLock sync.Mutex // For writing only
+}
+
+func (p *shadowResourceProvisionerShared) Close() error {
+ closers := []io.Closer{
+ &p.CloseErr,
+ }
+
+ for _, c := range closers {
+ // This should never happen, but we don't panic because a panic
+ // could affect the real behavior of Terraform and a shadow should
+ // never be able to do that.
+ if err := c.Close(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (p *shadowResourceProvisionerShadow) CloseShadow() error {
+ err := p.Shared.Close()
+ if err != nil {
+ err = fmt.Errorf("close error: %s", err)
+ }
+
+ return err
+}
+
+func (p *shadowResourceProvisionerShadow) ShadowError() error {
+ return p.Error
+}
+
+func (p *shadowResourceProvisionerShadow) Close() error {
+ v := p.Shared.CloseErr.Value()
+ if v == nil {
+ return nil
+ }
+
+ return v.(error)
+}
+
+func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
+ // Get the result of the validate call
+ raw := p.Shared.Validate.Value(c)
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProvisionerValidate)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'validate' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // We don't need to compare configurations because we key on the
+ // configuration so just return right away.
+ return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Apply(
+ output UIOutput, s *InstanceState, c *ResourceConfig) error {
+ // Get the value based on the key
+ key := s.ID
+ raw := p.Shared.Apply.Value(key)
+ if raw == nil {
+ return nil
+ }
+
+ compareVal, ok := raw.(*shadow.ComparedValue)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value: %#v", raw))
+ return nil
+ }
+
+ // With the compared value, we compare against our config
+ raw = compareVal.Value(c)
+ if raw == nil {
+ return nil
+ }
+
+ result, ok := raw.(*shadowResourceProvisionerApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value: %#v", raw))
+ return nil
+ }
+
+ return result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Stop() error {
+ // For the shadow, we always just return nil since a Stop indicates
+ // that we were interrupted and shadows are disabled during interrupts
+ // anyways.
+ return nil
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProvisionerValidate struct {
+ Config *ResourceConfig
+ ResultWarn []string
+ ResultErr []error
+}
+
+type shadowResourceProvisionerApply struct {
+ Config *ResourceConfig
+ ResultErr error
+}
+
+func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
+ c, ok := k.(*ResourceConfig)
+ if !ok {
+ return false
+ }
+
+ result, ok := v.(*shadowResourceProvisionerValidate)
+ if !ok {
+ return false
+ }
+
+ return c.Equal(result.Config)
+}
+
+func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
+ c, ok := k.(*ResourceConfig)
+ if !ok {
+ return false
+ }
+
+ result, ok := v.(*shadowResourceProvisionerApply)
+ if !ok {
+ return false
+ }
+
+ return c.Equal(result.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644
index 00000000..074b6824
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -0,0 +1,2118 @@
+package terraform
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/copystructure"
+ "github.com/satori/go.uuid"
+)
+
+const (
+ // StateVersion is the current version for our state file
+ StateVersion = 3
+)
+
+// rootModulePath is the path of the root module
+var rootModulePath = []string{"root"}
+
+// normalizeModulePath takes a raw module path and returns a path that
+// has the rootModulePath prepended to it. If I could go back in time I
+// would've never had a rootModulePath (empty path would be root). We can
+// still fix this but thats a big refactor that my branch doesn't make sense
+// for. Instead, this function normalizes paths.
+func normalizeModulePath(p []string) []string {
+ k := len(rootModulePath)
+
+ // If we already have a root module prefix, we're done
+ if len(p) >= len(rootModulePath) {
+ if reflect.DeepEqual(p[:k], rootModulePath) {
+ return p
+ }
+ }
+
+ // None? Prefix it
+ result := make([]string, len(rootModulePath)+len(p))
+ copy(result, rootModulePath)
+ copy(result[k:], p)
+ return result
+}
+
+// State keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+type State struct {
+ // Version is the state file protocol version.
+ Version int `json:"version"`
+
+ // TFVersion is the version of Terraform that wrote this state.
+ TFVersion string `json:"terraform_version,omitempty"`
+
+ // Serial is incremented on any operation that modifies
+ // the State file. It is used to detect potentially conflicting
+ // updates.
+ Serial int64 `json:"serial"`
+
+ // Lineage is set when a new, blank state is created and then
+ // never updated. This allows us to determine whether the serials
+ // of two states can be meaningfully compared.
+ // Apart from the guarantee that collisions between two lineages
+ // are very unlikely, this value is opaque and external callers
+ // should only compare lineage strings byte-for-byte for equality.
+ Lineage string `json:"lineage"`
+
+ // Remote is used to track the metadata required to
+ // pull and push state files from a remote storage endpoint.
+ Remote *RemoteState `json:"remote,omitempty"`
+
+ // Backend tracks the configuration for the backend in use with
+ // this state. This is used to track any changes in the backend
+ // configuration.
+ Backend *BackendState `json:"backend,omitempty"`
+
+ // Modules contains all the modules in a breadth-first order
+ Modules []*ModuleState `json:"modules"`
+
+ mu sync.Mutex
+}
+
+func (s *State) Lock() { s.mu.Lock() }
+func (s *State) Unlock() { s.mu.Unlock() }
+
+// NewState is used to initialize a blank state
+func NewState() *State {
+ s := &State{}
+ s.init()
+ return s
+}
+
+// Children returns the ModuleStates that are direct children of
+// the given path. If the path is "root", for example, then children
+// returned might be "root.child", but not "root.child.grandchild".
+func (s *State) Children(path []string) []*ModuleState {
+ s.Lock()
+ defer s.Unlock()
+ // TODO: test
+
+ return s.children(path)
+}
+
+func (s *State) children(path []string) []*ModuleState {
+ result := make([]*ModuleState, 0)
+ for _, m := range s.Modules {
+ if m == nil {
+ continue
+ }
+
+ if len(m.Path) != len(path)+1 {
+ continue
+ }
+ if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+ continue
+ }
+
+ result = append(result, m)
+ }
+
+ return result
+}
+
+// AddModule adds the module with the given path to the state.
+//
+// This should be the preferred method to add module states since it
+// allows us to optimize lookups later as well as control sorting.
+func (s *State) AddModule(path []string) *ModuleState {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.addModule(path)
+}
+
+func (s *State) addModule(path []string) *ModuleState {
+ // check if the module exists first
+ m := s.moduleByPath(path)
+ if m != nil {
+ return m
+ }
+
+ m = &ModuleState{Path: path}
+ m.init()
+ s.Modules = append(s.Modules, m)
+ s.sort()
+ return m
+}
+
+// ModuleByPath is used to lookup the module state for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (s *State) ModuleByPath(path []string) *ModuleState {
+ if s == nil {
+ return nil
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return s.moduleByPath(path)
+}
+
+func (s *State) moduleByPath(path []string) *ModuleState {
+ for _, mod := range s.Modules {
+ if mod == nil {
+ continue
+ }
+ if mod.Path == nil {
+ panic("missing module path")
+ }
+ if reflect.DeepEqual(mod.Path, path) {
+ return mod
+ }
+ }
+ return nil
+}
+
+// ModuleOrphans returns all the module orphans in this state by
+// returning their full paths. These paths can be used with ModuleByPath
+// to return the actual state.
+func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.moduleOrphans(path, c)
+
+}
+
+func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
+ // direct keeps track of what direct children we have both in our config
+ // and in our state. childrenKeys keeps track of what isn't an orphan.
+ direct := make(map[string]struct{})
+ childrenKeys := make(map[string]struct{})
+ if c != nil {
+ for _, m := range c.Modules {
+ childrenKeys[m.Name] = struct{}{}
+ direct[m.Name] = struct{}{}
+ }
+ }
+
+ // Go over the direct children and find any that aren't in our keys.
+ var orphans [][]string
+ for _, m := range s.children(path) {
+ key := m.Path[len(m.Path)-1]
+
+ // Record that we found this key as a direct child. We use this
+ // later to find orphan nested modules.
+ direct[key] = struct{}{}
+
+ // If we have a direct child still in our config, it is not an orphan
+ if _, ok := childrenKeys[key]; ok {
+ continue
+ }
+
+ orphans = append(orphans, m.Path)
+ }
+
+ // Find the orphans that are nested...
+ for _, m := range s.Modules {
+ if m == nil {
+ continue
+ }
+
+ // We only want modules that are at least grandchildren
+ if len(m.Path) < len(path)+2 {
+ continue
+ }
+
+ // If it isn't part of our tree, continue
+ if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+ continue
+ }
+
+ // If we have the direct child, then just skip it.
+ key := m.Path[len(path)]
+ if _, ok := direct[key]; ok {
+ continue
+ }
+
+ orphanPath := m.Path[:len(path)+1]
+
+ // Don't double-add if we've already added this orphan (which can happen if
+ // there are multiple nested sub-modules that get orphaned together).
+ alreadyAdded := false
+ for _, o := range orphans {
+ if reflect.DeepEqual(o, orphanPath) {
+ alreadyAdded = true
+ break
+ }
+ }
+ if alreadyAdded {
+ continue
+ }
+
+ // Add this orphan
+ orphans = append(orphans, orphanPath)
+ }
+
+ return orphans
+}
+
+// Empty returns true if the state is empty.
+func (s *State) Empty() bool {
+ if s == nil {
+ return true
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return len(s.Modules) == 0
+}
+
+// HasResources returns true if the state contains any resources.
+//
+// This is similar to !s.Empty, but returns true also in the case where the
+// state has modules but all of them are devoid of resources.
+func (s *State) HasResources() bool {
+ if s.Empty() {
+ return false
+ }
+
+ for _, mod := range s.Modules {
+ if len(mod.Resources) > 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsRemote returns true if State represents a state that exists and is
+// remote.
+func (s *State) IsRemote() bool {
+ if s == nil {
+ return false
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Remote == nil {
+ return false
+ }
+ if s.Remote.Type == "" {
+ return false
+ }
+
+ return true
+}
+
+// Validate validates the integrity of this state file.
+//
+// Certain properties of the statefile are expected by Terraform in order
+// to behave properly. The core of Terraform will assume that once it
+// receives a State structure that it has been validated. This validation
+// check should be called to ensure that.
+//
+// If this returns an error, then the user should be notified. The error
+// response will include detailed information on the nature of the error.
+func (s *State) Validate() error {
+ s.Lock()
+ defer s.Unlock()
+
+ var result error
+
+ // !!!! FOR DEVELOPERS !!!!
+ //
+ // Any errors returned from this Validate function will BLOCK TERRAFORM
+ // from loading a state file. Therefore, this should only contain checks
+ // that are only resolvable through manual intervention.
+ //
+ // !!!! FOR DEVELOPERS !!!!
+
+ // Make sure there are no duplicate module states. We open a new
+ // block here so we can use basic variable names and future validations
+ // can do the same.
+ {
+ found := make(map[string]struct{})
+ for _, ms := range s.Modules {
+ if ms == nil {
+ continue
+ }
+
+ key := strings.Join(ms.Path, ".")
+ if _, ok := found[key]; ok {
+ result = multierror.Append(result, fmt.Errorf(
+ strings.TrimSpace(stateValidateErrMultiModule), key))
+ continue
+ }
+
+ found[key] = struct{}{}
+ }
+ }
+
+ return result
+}
+
+// Remove removes the item in the state at the given address, returning
+// any errors that may have occurred.
+//
+// If the address references a module state or resource, it will delete
+// all children as well. To check what will be deleted, use a StateFilter
+// first.
+func (s *State) Remove(addr ...string) error {
+ s.Lock()
+ defer s.Unlock()
+
+ // Filter out what we need to delete
+ filter := &StateFilter{State: s}
+ results, err := filter.Filter(addr...)
+ if err != nil {
+ return err
+ }
+
+ // If we have no results, just exit early, we're not going to do anything.
+ // While what happens below is fairly fast, this is an important early
+ // exit since the prune below might modify the state more and we don't
+ // want to modify the state if we don't have to.
+ if len(results) == 0 {
+ return nil
+ }
+
+ // Go through each result and grab what we need
+ removed := make(map[interface{}]struct{})
+ for _, r := range results {
+ // Convert the path to our own type
+ path := append([]string{"root"}, r.Path...)
+
+ // If we removed this already, then ignore
+ if _, ok := removed[r.Value]; ok {
+ continue
+ }
+
+ // If we removed the parent already, then ignore
+ if r.Parent != nil {
+ if _, ok := removed[r.Parent.Value]; ok {
+ continue
+ }
+ }
+
+ // Add this to the removed list
+ removed[r.Value] = struct{}{}
+
+ switch v := r.Value.(type) {
+ case *ModuleState:
+ s.removeModule(path, v)
+ case *ResourceState:
+ s.removeResource(path, v)
+ case *InstanceState:
+ s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
+ default:
+ return fmt.Errorf("unknown type to delete: %T", r.Value)
+ }
+ }
+
+ // Prune since the removal functions often do the bare minimum to
+ // remove a thing and may leave around dangling empty modules, resources,
+ // etc. Prune will clean that all up.
+ s.prune()
+
+ return nil
+}
+
+func (s *State) removeModule(path []string, v *ModuleState) {
+ for i, m := range s.Modules {
+ if m == v {
+ s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
+ return
+ }
+ }
+}
+
+func (s *State) removeResource(path []string, v *ResourceState) {
+ // Get the module this resource lives in. If it doesn't exist, we're done.
+ mod := s.moduleByPath(path)
+ if mod == nil {
+ return
+ }
+
+ // Find this resource. This is a O(N) lookup when if we had the key
+ // it could be O(1) but even with thousands of resources this shouldn't
+ // matter right now. We can easily up performance here when the time comes.
+ for k, r := range mod.Resources {
+ if r == v {
+ // Found it
+ delete(mod.Resources, k)
+ return
+ }
+ }
+}
+
+func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
+ // Go through the resource and find the instance that matches this
+ // (if any) and remove it.
+
+ // Check primary
+ if r.Primary == v {
+ r.Primary = nil
+ return
+ }
+
+ // Check lists
+ lists := [][]*InstanceState{r.Deposed}
+ for _, is := range lists {
+ for i, instance := range is {
+ if instance == v {
+ // Found it, remove it
+ is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
+
+ // Done
+ return
+ }
+ }
+ }
+}
+
+// RootModule returns the ModuleState for the root module
+func (s *State) RootModule() *ModuleState {
+ root := s.ModuleByPath(rootModulePath)
+ if root == nil {
+ panic("missing root module")
+ }
+ return root
+}
+
+// Equal tests if one state is equal to another.
+func (s *State) Equal(other *State) bool {
+ // If one is nil, we do a direct check
+ if s == nil || other == nil {
+ return s == other
+ }
+
+ s.Lock()
+ defer s.Unlock()
+ return s.equal(other)
+}
+
+func (s *State) equal(other *State) bool {
+ if s == nil || other == nil {
+ return s == other
+ }
+
+ // If the versions are different, they're certainly not equal
+ if s.Version != other.Version {
+ return false
+ }
+
+ // If any of the modules are not equal, then this state isn't equal
+ if len(s.Modules) != len(other.Modules) {
+ return false
+ }
+ for _, m := range s.Modules {
+ // This isn't very optimal currently but works.
+ otherM := other.moduleByPath(m.Path)
+ if otherM == nil {
+ return false
+ }
+
+ // If they're not equal, then we're not equal!
+ if !m.Equal(otherM) {
+ return false
+ }
+ }
+
+ return true
+}
+
+type StateAgeComparison int
+
+const (
+ StateAgeEqual StateAgeComparison = 0
+ StateAgeReceiverNewer StateAgeComparison = 1
+ StateAgeReceiverOlder StateAgeComparison = -1
+)
+
+// CompareAges compares one state with another for which is "older".
+//
+// This is a simple check using the state's serial, and is thus only as
+// reliable as the serial itself. In the normal case, only one state
+// exists for a given combination of lineage/serial, but Terraform
+// does not guarantee this and so the result of this method should be
+// used with care.
+//
+// Returns an integer that is negative if the receiver is older than
+// the argument, positive if the converse, and zero if they are equal.
+// An error is returned if the two states are not of the same lineage,
+// in which case the integer returned has no meaning.
+func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
+ // nil states are "older" than actual states
+ switch {
+ case s != nil && other == nil:
+ return StateAgeReceiverNewer, nil
+ case s == nil && other != nil:
+ return StateAgeReceiverOlder, nil
+ case s == nil && other == nil:
+ return StateAgeEqual, nil
+ }
+
+ if !s.SameLineage(other) {
+ return StateAgeEqual, fmt.Errorf(
+ "can't compare two states of differing lineage",
+ )
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ switch {
+ case s.Serial < other.Serial:
+ return StateAgeReceiverOlder, nil
+ case s.Serial > other.Serial:
+ return StateAgeReceiverNewer, nil
+ default:
+ return StateAgeEqual, nil
+ }
+}
+
+// SameLineage returns true only if the state given in argument belongs
+// to the same "lineage" of states as the receiver.
+func (s *State) SameLineage(other *State) bool {
+ s.Lock()
+ defer s.Unlock()
+
+ // If one of the states has no lineage then it is assumed to predate
+ // this concept, and so we'll accept it as belonging to any lineage
+ // so that a lineage string can be assigned to newer versions
+ // without breaking compatibility with older versions.
+ if s.Lineage == "" || other.Lineage == "" {
+ return true
+ }
+
+ return s.Lineage == other.Lineage
+}
+
+// DeepCopy performs a deep copy of the state structure and returns
+// a new structure.
+func (s *State) DeepCopy() *State {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*State)
+}
+
+// IncrementSerialMaybe increments the serial number of this state
+// if it different from the other state.
+func (s *State) IncrementSerialMaybe(other *State) {
+ if s == nil {
+ return
+ }
+ if other == nil {
+ return
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Serial > other.Serial {
+ return
+ }
+ if other.TFVersion != s.TFVersion || !s.equal(other) {
+ if other.Serial > s.Serial {
+ s.Serial = other.Serial
+ }
+
+ s.Serial++
+ }
+}
+
+// FromFutureTerraform checks if this state was written by a Terraform
+// version from the future.
+func (s *State) FromFutureTerraform() bool {
+ s.Lock()
+ defer s.Unlock()
+
+ // No TF version means it is certainly from the past
+ if s.TFVersion == "" {
+ return false
+ }
+
+ v := version.Must(version.NewVersion(s.TFVersion))
+ return SemVersion.LessThan(v)
+}
+
+func (s *State) Init() {
+ s.Lock()
+ defer s.Unlock()
+ s.init()
+}
+
+func (s *State) init() {
+ if s.Version == 0 {
+ s.Version = StateVersion
+ }
+ if s.moduleByPath(rootModulePath) == nil {
+ s.addModule(rootModulePath)
+ }
+ s.ensureHasLineage()
+
+ for _, mod := range s.Modules {
+ if mod != nil {
+ mod.init()
+ }
+ }
+
+ if s.Remote != nil {
+ s.Remote.init()
+ }
+
+}
+
+func (s *State) EnsureHasLineage() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.ensureHasLineage()
+}
+
+func (s *State) ensureHasLineage() {
+ if s.Lineage == "" {
+ s.Lineage = uuid.NewV4().String()
+ log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
+ } else {
+ log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
+ }
+}
+
+// AddModuleState insert this module state and override any existing ModuleState
+func (s *State) AddModuleState(mod *ModuleState) {
+ mod.init()
+ s.Lock()
+ defer s.Unlock()
+
+ s.addModuleState(mod)
+}
+
+func (s *State) addModuleState(mod *ModuleState) {
+ for i, m := range s.Modules {
+ if reflect.DeepEqual(m.Path, mod.Path) {
+ s.Modules[i] = mod
+ return
+ }
+ }
+
+ s.Modules = append(s.Modules, mod)
+ s.sort()
+}
+
+// prune is used to remove any resources that are no longer required
+func (s *State) prune() {
+ if s == nil {
+ return
+ }
+
+ // Filter out empty modules.
+ // A module is always assumed to have a path, and it's length isn't always
+ // bounds checked later on. Modules may be "emptied" during destroy, but we
+ // never want to store those in the state.
+ for i := 0; i < len(s.Modules); i++ {
+ if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
+ s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
+ i--
+ }
+ }
+
+ for _, mod := range s.Modules {
+ mod.prune()
+ }
+ if s.Remote != nil && s.Remote.Empty() {
+ s.Remote = nil
+ }
+}
+
+// sort sorts the modules
+func (s *State) sort() {
+ sort.Sort(moduleStateSort(s.Modules))
+
+ // Allow modules to be sorted
+ for _, m := range s.Modules {
+ if m != nil {
+ m.sort()
+ }
+ }
+}
+
+func (s *State) String() string {
+ if s == nil {
+ return "<nil>"
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+ for _, m := range s.Modules {
+ mStr := m.String()
+
+ // If we're the root module, we just write the output directly.
+ if reflect.DeepEqual(m.Path, rootModulePath) {
+ buf.WriteString(mStr + "\n")
+ continue
+ }
+
+ buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
+
+ s := bufio.NewScanner(strings.NewReader(mStr))
+ for s.Scan() {
+ text := s.Text()
+ if text != "" {
+ text = " " + text
+ }
+
+ buf.WriteString(fmt.Sprintf("%s\n", text))
+ }
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+// BackendState stores the configuration to connect to a remote backend.
+type BackendState struct {
+ Type string `json:"type"` // Backend type
+ Config map[string]interface{} `json:"config"` // Backend raw config
+
+ // Hash is the hash code to uniquely identify the original source
+ // configuration. We use this to detect when there is a change in
+ // configuration even when "type" isn't changed.
+ Hash uint64 `json:"hash"`
+}
+
+// Empty returns true if BackendState has no state.
+func (s *BackendState) Empty() bool {
+ return s == nil || s.Type == ""
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+// The Hash stored in the backend state needs to match the config itself, but
+// we need to compare the backend config after it has been combined with all
+// options.
+// This function must match the implementation used by config.Backend.
+func (s *BackendState) Rehash() uint64 {
+ if s == nil {
+ return 0
+ }
+
+ cfg := config.Backend{
+ Type: s.Type,
+ RawConfig: &config.RawConfig{
+ Raw: s.Config,
+ },
+ }
+
+ return cfg.Rehash()
+}
+
+// RemoteState is used to track the information about a remote
+// state store that we push/pull state to.
+type RemoteState struct {
+ // Type controls the client we use for the remote state
+ Type string `json:"type"`
+
+ // Config is used to store arbitrary configuration that
+ // is type specific
+ Config map[string]string `json:"config"`
+
+ mu sync.Mutex
+}
+
+func (s *RemoteState) Lock() { s.mu.Lock() }
+func (s *RemoteState) Unlock() { s.mu.Unlock() }
+
+func (r *RemoteState) init() {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.Config == nil {
+ r.Config = make(map[string]string)
+ }
+}
+
+func (r *RemoteState) deepcopy() *RemoteState {
+ r.Lock()
+ defer r.Unlock()
+
+ confCopy := make(map[string]string, len(r.Config))
+ for k, v := range r.Config {
+ confCopy[k] = v
+ }
+ return &RemoteState{
+ Type: r.Type,
+ Config: confCopy,
+ }
+}
+
+func (r *RemoteState) Empty() bool {
+ if r == nil {
+ return true
+ }
+ r.Lock()
+ defer r.Unlock()
+
+ return r.Type == ""
+}
+
+func (r *RemoteState) Equals(other *RemoteState) bool {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.Type != other.Type {
+ return false
+ }
+ if len(r.Config) != len(other.Config) {
+ return false
+ }
+ for k, v := range r.Config {
+ if other.Config[k] != v {
+ return false
+ }
+ }
+ return true
+}
+
+// OutputState is used to track the state relevant to a single output.
+type OutputState struct {
+ // Sensitive describes whether the output is considered sensitive,
+ // which may lead to masking the value on screen in some cases.
+ Sensitive bool `json:"sensitive"`
+ // Type describes the structure of Value. Valid values are "string",
+ // "map" and "list"
+ Type string `json:"type"`
+ // Value contains the value of the output, in the structure described
+ // by the Type field.
+ Value interface{} `json:"value"`
+
+ mu sync.Mutex
+}
+
+func (s *OutputState) Lock() { s.mu.Lock() }
+func (s *OutputState) Unlock() { s.mu.Unlock() }
+
+func (s *OutputState) String() string {
+ return fmt.Sprintf("%#v", s.Value)
+}
+
+// Equal compares two OutputState structures for equality. nil values are
+// considered equal.
+func (s *OutputState) Equal(other *OutputState) bool {
+ if s == nil && other == nil {
+ return true
+ }
+
+ if s == nil || other == nil {
+ return false
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Type != other.Type {
+ return false
+ }
+
+ if s.Sensitive != other.Sensitive {
+ return false
+ }
+
+ if !reflect.DeepEqual(s.Value, other.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s *OutputState) deepcopy() *OutputState {
+ if s == nil {
+ return nil
+ }
+
+ stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(fmt.Errorf("Error copying output value: %s", err))
+ }
+
+ return stateCopy.(*OutputState)
+}
+
+// ModuleState is used to track all the state relevant to a single
+// module. Previous to Terraform 0.3, all state belonged to the "root"
+// module.
+type ModuleState struct {
+ // Path is the import path from the root module. Modules imports are
+ // always disjoint, so the path represents amodule tree
+ Path []string `json:"path"`
+
+ // Outputs declared by the module and maintained for each module
+ // even though only the root module technically needs to be kept.
+ // This allows operators to inspect values at the boundaries.
+ Outputs map[string]*OutputState `json:"outputs"`
+
+ // Resources is a mapping of the logically named resource to
+ // the state of the resource. Each resource may actually have
+ // N instances underneath, although a user only needs to think
+ // about the 1:1 case.
+ Resources map[string]*ResourceState `json:"resources"`
+
+ // Dependencies are a list of things that this module relies on
+ // existing to remain intact. For example: an module may depend
+ // on a VPC ID given by an aws_vpc resource.
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a module that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on"`
+
+ mu sync.Mutex
+}
+
+func (s *ModuleState) Lock() { s.mu.Lock() }
+func (s *ModuleState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether one module state is equal to another.
+func (m *ModuleState) Equal(other *ModuleState) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ // Paths must be equal
+ if !reflect.DeepEqual(m.Path, other.Path) {
+ return false
+ }
+
+ // Outputs must be equal
+ if len(m.Outputs) != len(other.Outputs) {
+ return false
+ }
+ for k, v := range m.Outputs {
+ if !other.Outputs[k].Equal(v) {
+ return false
+ }
+ }
+
+ // Dependencies must be equal. This sorts these in place but
+ // this shouldn't cause any problems.
+ sort.Strings(m.Dependencies)
+ sort.Strings(other.Dependencies)
+ if len(m.Dependencies) != len(other.Dependencies) {
+ return false
+ }
+ for i, d := range m.Dependencies {
+ if other.Dependencies[i] != d {
+ return false
+ }
+ }
+
+ // Resources must be equal
+ if len(m.Resources) != len(other.Resources) {
+ return false
+ }
+ for k, r := range m.Resources {
+ otherR, ok := other.Resources[k]
+ if !ok {
+ return false
+ }
+
+ if !r.Equal(otherR) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (m *ModuleState) IsRoot() bool {
+ m.Lock()
+ defer m.Unlock()
+ return reflect.DeepEqual(m.Path, rootModulePath)
+}
+
+// IsDescendent returns true if other is a descendent of this module.
+func (m *ModuleState) IsDescendent(other *ModuleState) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ i := len(m.Path)
+ return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
+}
+
+// Orphans returns a list of keys of resources that are in the State
+// but aren't present in the configuration itself. Hence, these keys
+// represent the state of resources that are orphans.
+func (m *ModuleState) Orphans(c *config.Config) []string {
+ m.Lock()
+ defer m.Unlock()
+
+ keys := make(map[string]struct{})
+ for k, _ := range m.Resources {
+ keys[k] = struct{}{}
+ }
+
+ if c != nil {
+ for _, r := range c.Resources {
+ delete(keys, r.Id())
+
+ for k, _ := range keys {
+ if strings.HasPrefix(k, r.Id()+".") {
+ delete(keys, k)
+ }
+ }
+ }
+ }
+
+ result := make([]string, 0, len(keys))
+ for k, _ := range keys {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// View returns a view with the given resource prefix.
+func (m *ModuleState) View(id string) *ModuleState {
+ if m == nil {
+ return m
+ }
+
+ r := m.deepcopy()
+ for k, _ := range r.Resources {
+ if id == k || strings.HasPrefix(k, id+".") {
+ continue
+ }
+
+ delete(r.Resources, k)
+ }
+
+ return r
+}
+
+func (m *ModuleState) init() {
+ m.Lock()
+ defer m.Unlock()
+
+ if m.Path == nil {
+ m.Path = []string{}
+ }
+ if m.Outputs == nil {
+ m.Outputs = make(map[string]*OutputState)
+ }
+ if m.Resources == nil {
+ m.Resources = make(map[string]*ResourceState)
+ }
+
+ if m.Dependencies == nil {
+ m.Dependencies = make([]string, 0)
+ }
+
+ for _, rs := range m.Resources {
+ rs.init()
+ }
+}
+
+func (m *ModuleState) deepcopy() *ModuleState {
+ if m == nil {
+ return nil
+ }
+
+ stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
+ if err != nil {
+ panic(err)
+ }
+
+ return stateCopy.(*ModuleState)
+}
+
+// prune is used to remove any resources that are no longer required
+func (m *ModuleState) prune() {
+ m.Lock()
+ defer m.Unlock()
+
+ for k, v := range m.Resources {
+ if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
+ delete(m.Resources, k)
+ continue
+ }
+
+ v.prune()
+ }
+
+ for k, v := range m.Outputs {
+ if v.Value == config.UnknownVariableValue {
+ delete(m.Outputs, k)
+ }
+ }
+
+ m.Dependencies = uniqueStrings(m.Dependencies)
+}
+
+func (m *ModuleState) sort() {
+ for _, v := range m.Resources {
+ v.sort()
+ }
+}
+
+func (m *ModuleState) String() string {
+ m.Lock()
+ defer m.Unlock()
+
+ var buf bytes.Buffer
+
+ if len(m.Resources) == 0 {
+ buf.WriteString("<no state>")
+ }
+
+ names := make([]string, 0, len(m.Resources))
+ for name, _ := range m.Resources {
+ names = append(names, name)
+ }
+
+ sort.Sort(resourceNameSort(names))
+
+ for _, k := range names {
+ rs := m.Resources[k]
+ var id string
+ if rs.Primary != nil {
+ id = rs.Primary.ID
+ }
+ if id == "" {
+ id = "<not created>"
+ }
+
+ taintStr := ""
+ if rs.Primary.Tainted {
+ taintStr = " (tainted)"
+ }
+
+ deposedStr := ""
+ if len(rs.Deposed) > 0 {
+ deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
+ }
+
+ buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
+ buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
+ if rs.Provider != "" {
+ buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider))
+ }
+
+ var attributes map[string]string
+ if rs.Primary != nil {
+ attributes = rs.Primary.Attributes
+ }
+ attrKeys := make([]string, 0, len(attributes))
+ for ak, _ := range attributes {
+ if ak == "id" {
+ continue
+ }
+
+ attrKeys = append(attrKeys, ak)
+ }
+
+ sort.Strings(attrKeys)
+
+ for _, ak := range attrKeys {
+ av := attributes[ak]
+ buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
+ }
+
+ for idx, t := range rs.Deposed {
+ taintStr := ""
+ if t.Tainted {
+ taintStr = " (tainted)"
+ }
+ buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
+ }
+
+ if len(rs.Dependencies) > 0 {
+ buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
+ for _, dep := range rs.Dependencies {
+ buf.WriteString(fmt.Sprintf(" %s\n", dep))
+ }
+ }
+ }
+
+ if len(m.Outputs) > 0 {
+ buf.WriteString("\nOutputs:\n\n")
+
+ ks := make([]string, 0, len(m.Outputs))
+ for k, _ := range m.Outputs {
+ ks = append(ks, k)
+ }
+
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ v := m.Outputs[k]
+ switch vTyped := v.Value.(type) {
+ case string:
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+ case []interface{}:
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+ case map[string]interface{}:
+ var mapKeys []string
+ for key, _ := range vTyped {
+ mapKeys = append(mapKeys, key)
+ }
+ sort.Strings(mapKeys)
+
+ var mapBuf bytes.Buffer
+ mapBuf.WriteString("{")
+ for _, key := range mapKeys {
+ mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
+ }
+ mapBuf.WriteString("}")
+
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
+ }
+ }
+ }
+
+ return buf.String()
+}
+
+// ResourceStateKey is a structured representation of the key used for the
+// ModuleState.Resources mapping
+type ResourceStateKey struct {
+ Name string
+ Type string
+ Mode config.ResourceMode
+ Index int
+}
+
+// Equal determines whether two ResourceStateKeys are the same
+func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
+ if rsk == nil || other == nil {
+ return false
+ }
+ if rsk.Mode != other.Mode {
+ return false
+ }
+ if rsk.Type != other.Type {
+ return false
+ }
+ if rsk.Name != other.Name {
+ return false
+ }
+ if rsk.Index != other.Index {
+ return false
+ }
+ return true
+}
+
+func (rsk *ResourceStateKey) String() string {
+ if rsk == nil {
+ return ""
+ }
+ var prefix string
+ switch rsk.Mode {
+ case config.ManagedResourceMode:
+ prefix = ""
+ case config.DataResourceMode:
+ prefix = "data."
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
+ }
+ if rsk.Index == -1 {
+ return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
+ }
+ return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
+}
+
+// ParseResourceStateKey accepts a key in the format used by
+// ModuleState.Resources and returns a resource name and resource index. In the
+// state, a resource has the format "type.name.index" or "type.name". In the
+// latter case, the index is returned as -1.
+func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
+ parts := strings.Split(k, ".")
+ mode := config.ManagedResourceMode
+ if len(parts) > 0 && parts[0] == "data" {
+ mode = config.DataResourceMode
+ // Don't need the constant "data" prefix for parsing
+ // now that we've figured out the mode.
+ parts = parts[1:]
+ }
+ if len(parts) < 2 || len(parts) > 3 {
+ return nil, fmt.Errorf("Malformed resource state key: %s", k)
+ }
+ rsk := &ResourceStateKey{
+ Mode: mode,
+ Type: parts[0],
+ Name: parts[1],
+ Index: -1,
+ }
+ if len(parts) == 3 {
+ index, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return nil, fmt.Errorf("Malformed resource state key index: %s", k)
+ }
+ rsk.Index = index
+ }
+ return rsk, nil
+}
+
+// ResourceState holds the state of a resource that is used so that
+// a provider can find and manage an existing resource as well as for
+// storing attributes that are used to populate variables of child
+// resources.
+//
+// Attributes has attributes about the created resource that are
+// queryable in interpolation: "${type.id.attr}"
+//
+// Extra is just extra data that a provider can return that we store
+// for later, but is not exposed in any way to the user.
+//
+type ResourceState struct {
+ // This is filled in and managed by Terraform, and is the resource
+ // type itself such as "mycloud_instance". If a resource provider sets
+ // this value, it won't be persisted.
+ Type string `json:"type"`
+
+ // Dependencies are a list of things that this resource relies on
+ // existing to remain intact. For example: an AWS instance might
+ // depend on a subnet (which itself might depend on a VPC, and so
+ // on).
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a resource that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on"`
+
+ // Primary is the current active instance for this resource.
+ // It can be replaced but only after a successful creation.
+ // This is the instances on which providers will act.
+ Primary *InstanceState `json:"primary"`
+
+ // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+ // Primary is Deposed to get it out of the way for the replacement Primary to
+ // be created by Apply. If the replacement Primary creates successfully, the
+ // Deposed instance is cleaned up.
+ //
+ // If there were problems creating the replacement Primary, the Deposed
+ // instance and the (now tainted) replacement Primary will be swapped so the
+ // tainted replacement will be cleaned up instead.
+ //
+ // An instance will remain in the Deposed list until it is successfully
+ // destroyed and purged.
+ Deposed []*InstanceState `json:"deposed"`
+
+ // Provider is used when a resource is connected to a provider with an alias.
+ // If this string is empty, the resource is connected to the default provider,
+ // e.g. "aws_instance" goes with the "aws" provider.
+ // If the resource block contained a "provider" key, that value will be set here.
+ Provider string `json:"provider"`
+
+ mu sync.Mutex
+}
+
+func (s *ResourceState) Lock() { s.mu.Lock() }
+func (s *ResourceState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether two ResourceStates are equal.
+func (s *ResourceState) Equal(other *ResourceState) bool {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Type != other.Type {
+ return false
+ }
+
+ if s.Provider != other.Provider {
+ return false
+ }
+
+ // Dependencies must be equal
+ sort.Strings(s.Dependencies)
+ sort.Strings(other.Dependencies)
+ if len(s.Dependencies) != len(other.Dependencies) {
+ return false
+ }
+ for i, d := range s.Dependencies {
+ if other.Dependencies[i] != d {
+ return false
+ }
+ }
+
+ // States must be equal
+ if !s.Primary.Equal(other.Primary) {
+ return false
+ }
+
+ return true
+}
+
+// Taint marks a resource as tainted.
+func (s *ResourceState) Taint() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary != nil {
+ s.Primary.Tainted = true
+ }
+}
+
+// Untaint unmarks a resource as tainted.
+func (s *ResourceState) Untaint() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary != nil {
+ s.Primary.Tainted = false
+ }
+}
+
+func (s *ResourceState) init() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary == nil {
+ s.Primary = &InstanceState{}
+ }
+ s.Primary.init()
+
+ if s.Dependencies == nil {
+ s.Dependencies = []string{}
+ }
+
+ if s.Deposed == nil {
+ s.Deposed = make([]*InstanceState, 0)
+ }
+}
+
+func (s *ResourceState) deepcopy() *ResourceState {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*ResourceState)
+}
+
+// prune is used to remove any instances that are no longer required
+func (s *ResourceState) prune() {
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.Deposed)
+ for i := 0; i < n; i++ {
+ inst := s.Deposed[i]
+ if inst == nil || inst.ID == "" {
+ copy(s.Deposed[i:], s.Deposed[i+1:])
+ s.Deposed[n-1] = nil
+ n--
+ i--
+ }
+ }
+ s.Deposed = s.Deposed[:n]
+
+ s.Dependencies = uniqueStrings(s.Dependencies)
+}
+
+func (s *ResourceState) sort() {
+ s.Lock()
+ defer s.Unlock()
+
+ sort.Strings(s.Dependencies)
+}
+
+func (s *ResourceState) String() string {
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+ buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
+ return buf.String()
+}
+
+// InstanceState is used to track the unique state information belonging
+// to a given instance.
+type InstanceState struct {
+ // A unique ID for this resource. This is opaque to Terraform
+ // and is only meant as a lookup mechanism for the providers.
+ ID string `json:"id"`
+
+ // Attributes are basic information about the resource. Any keys here
+ // are accessible in variable format within Terraform configurations:
+ // ${resourcetype.name.attribute}.
+ Attributes map[string]string `json:"attributes"`
+
+ // Ephemeral is used to store any state associated with this instance
+ // that is necessary for the Terraform run to complete, but is not
+ // persisted to a state file.
+ Ephemeral EphemeralState `json:"-"`
+
+ // Meta is a simple K/V map that is persisted to the State but otherwise
+ // ignored by Terraform core. It's meant to be used for accounting by
+ // external client code. The value here must only contain Go primitives
+ // and collections.
+ Meta map[string]interface{} `json:"meta"`
+
+ // Tainted is used to mark a resource for recreation.
+ Tainted bool `json:"tainted"`
+
+ mu sync.Mutex
+}
+
+func (s *InstanceState) Lock() { s.mu.Lock() }
+func (s *InstanceState) Unlock() { s.mu.Unlock() }
+
+func (s *InstanceState) init() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Attributes == nil {
+ s.Attributes = make(map[string]string)
+ }
+ if s.Meta == nil {
+ s.Meta = make(map[string]interface{})
+ }
+ s.Ephemeral.init()
+}
+
+// Copy all the Fields from another InstanceState
+func (s *InstanceState) Set(from *InstanceState) {
+ s.Lock()
+ defer s.Unlock()
+
+ from.Lock()
+ defer from.Unlock()
+
+ s.ID = from.ID
+ s.Attributes = from.Attributes
+ s.Ephemeral = from.Ephemeral
+ s.Meta = from.Meta
+ s.Tainted = from.Tainted
+}
+
+func (s *InstanceState) DeepCopy() *InstanceState {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*InstanceState)
+}
+
+func (s *InstanceState) Empty() bool {
+ if s == nil {
+ return true
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return s.ID == ""
+}
+
+func (s *InstanceState) Equal(other *InstanceState) bool {
+ // Short circuit some nil checks
+ if s == nil || other == nil {
+ return s == other
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ // IDs must be equal
+ if s.ID != other.ID {
+ return false
+ }
+
+ // Attributes must be equal
+ if len(s.Attributes) != len(other.Attributes) {
+ return false
+ }
+ for k, v := range s.Attributes {
+ otherV, ok := other.Attributes[k]
+ if !ok {
+ return false
+ }
+
+ if v != otherV {
+ return false
+ }
+ }
+
+ // Meta must be equal
+ if len(s.Meta) != len(other.Meta) {
+ return false
+ }
+ if s.Meta != nil && other.Meta != nil {
+ // We only do the deep check if both are non-nil. If one is nil
+ // we treat it as equal since their lengths are both zero (check
+ // above).
+ if !reflect.DeepEqual(s.Meta, other.Meta) {
+ return false
+ }
+ }
+
+ if s.Tainted != other.Tainted {
+ return false
+ }
+
+ return true
+}
+
+// MergeDiff takes a ResourceDiff and merges the attributes into
+// this resource state in order to generate a new state. This new
+// state can be used to provide updated attribute lookups for
+// variable interpolation.
+//
+// If the diff attribute requires computing the value, and hence
+// won't be available until apply, the value is replaced with the
+// computeID.
+func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
+ result := s.DeepCopy()
+ if result == nil {
+ result = new(InstanceState)
+ }
+ result.init()
+
+ if s != nil {
+ s.Lock()
+ defer s.Unlock()
+ for k, v := range s.Attributes {
+ result.Attributes[k] = v
+ }
+ }
+ if d != nil {
+ for k, diff := range d.CopyAttributes() {
+ if diff.NewRemoved {
+ delete(result.Attributes, k)
+ continue
+ }
+ if diff.NewComputed {
+ result.Attributes[k] = config.UnknownVariableValue
+ continue
+ }
+
+ result.Attributes[k] = diff.New
+ }
+ }
+
+ return result
+}
+
+func (s *InstanceState) String() string {
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+
+ if s == nil || s.ID == "" {
+ return "<not created>"
+ }
+
+ buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
+
+ attributes := s.Attributes
+ attrKeys := make([]string, 0, len(attributes))
+ for ak, _ := range attributes {
+ if ak == "id" {
+ continue
+ }
+
+ attrKeys = append(attrKeys, ak)
+ }
+ sort.Strings(attrKeys)
+
+ for _, ak := range attrKeys {
+ av := attributes[ak]
+ buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
+ }
+
+ buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
+
+ return buf.String()
+}
+
+// EphemeralState is used for transient state that is only kept in-memory
+type EphemeralState struct {
+ // ConnInfo is used for the providers to export information which is
+ // used to connect to the resource for provisioning. For example,
+ // this could contain SSH or WinRM credentials.
+ ConnInfo map[string]string `json:"-"`
+
+ // Type is used to specify the resource type for this instance. This is only
+ // required for import operations (as documented). If the documentation
+ // doesn't state that you need to set this, then don't worry about
+ // setting it.
+ Type string `json:"-"`
+}
+
+func (e *EphemeralState) init() {
+ if e.ConnInfo == nil {
+ e.ConnInfo = make(map[string]string)
+ }
+}
+
+func (e *EphemeralState) DeepCopy() *EphemeralState {
+ copy, err := copystructure.Config{Lock: true}.Copy(e)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*EphemeralState)
+}
+
+type jsonStateVersionIdentifier struct {
+ Version int `json:"version"`
+}
+
+// Check if this is a V0 format - the magic bytes at the start of the file
+// should be "tfstate" if so. We no longer support upgrading this type of
+// state but return an error message explaining to a user how they can
+// upgrade via the 0.6.x series.
+func testForV0State(buf *bufio.Reader) error {
+ start, err := buf.Peek(len("tfstate"))
+ if err != nil {
+ return fmt.Errorf("Failed to check for magic bytes: %v", err)
+ }
+ if string(start) == "tfstate" {
+ return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
+ "format which was used prior to Terraform 0.3. Please upgrade\n" +
+ "this state file using Terraform 0.6.16 prior to using it with\n" +
+ "Terraform 0.7.")
+ }
+
+ return nil
+}
+
+// ErrNoState is returned by ReadState when the io.Reader contains no data
+var ErrNoState = errors.New("no state")
+
+// ReadState reads a state structure out of a reader in the format that
+// was written by WriteState.
+func ReadState(src io.Reader) (*State, error) {
+ buf := bufio.NewReader(src)
+ if _, err := buf.Peek(1); err != nil {
+ // the error is either io.EOF or "invalid argument", and both are from
+ // an empty state.
+ return nil, ErrNoState
+ }
+
+ if err := testForV0State(buf); err != nil {
+ return nil, err
+ }
+
+ // If we are JSON we buffer the whole thing in memory so we can read it twice.
+ // This is suboptimal, but will work for now.
+ jsonBytes, err := ioutil.ReadAll(buf)
+ if err != nil {
+ return nil, fmt.Errorf("Reading state file failed: %v", err)
+ }
+
+ versionIdentifier := &jsonStateVersionIdentifier{}
+ if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
+ return nil, fmt.Errorf("Decoding state file version failed: %v", err)
+ }
+
+ var result *State
+ switch versionIdentifier.Version {
+ case 0:
+ return nil, fmt.Errorf("State version 0 is not supported as JSON.")
+ case 1:
+ v1State, err := ReadStateV1(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ v2State, err := upgradeStateV1ToV2(v1State)
+ if err != nil {
+ return nil, err
+ }
+
+ v3State, err := upgradeStateV2ToV3(v2State)
+ if err != nil {
+ return nil, err
+ }
+
+ // increment the Serial whenever we upgrade state
+ v3State.Serial++
+ result = v3State
+ case 2:
+ v2State, err := ReadStateV2(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+ v3State, err := upgradeStateV2ToV3(v2State)
+ if err != nil {
+ return nil, err
+ }
+
+ v3State.Serial++
+ result = v3State
+ case 3:
+ v3State, err := ReadStateV3(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ result = v3State
+ default:
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), versionIdentifier.Version)
+ }
+
+ // If we reached this place we must have a result set
+ if result == nil {
+ panic("resulting state in load not set, assertion failed")
+ }
+
+ // Prune the state when read it. Its possible to write unpruned states or
+ // for a user to make a state unpruned (nil-ing a module state for example).
+ result.prune()
+
+ // Validate the state file is valid
+ if err := result.Validate(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
+ v1State := &stateV1{}
+ if err := json.Unmarshal(jsonBytes, v1State); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ if v1State.Version != 1 {
+ return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
+ "read %d, expected 1", v1State.Version)
+ }
+
+ return v1State, nil
+}
+
+func ReadStateV2(jsonBytes []byte) (*State, error) {
+ state := &State{}
+ if err := json.Unmarshal(jsonBytes, state); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ // Check the version, this to ensure we don't read a future
+ // version that we don't understand
+ if state.Version > StateVersion {
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), state.Version)
+ }
+
+ // Make sure the version is semantic
+ if state.TFVersion != "" {
+ if _, err := version.NewVersion(state.TFVersion); err != nil {
+ return nil, fmt.Errorf(
+ "State contains invalid version: %s\n\n"+
+ "Terraform validates the version format prior to writing it. This\n"+
+ "means that this is invalid of the state becoming corrupted through\n"+
+ "some external means. Please manually modify the Terraform version\n"+
+ "field to be a proper semantic version.",
+ state.TFVersion)
+ }
+ }
+
+ // catch any unitialized fields in the state
+ state.init()
+
+ // Sort it
+ state.sort()
+
+ return state, nil
+}
+
+func ReadStateV3(jsonBytes []byte) (*State, error) {
+ state := &State{}
+ if err := json.Unmarshal(jsonBytes, state); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ // Check the version, this to ensure we don't read a future
+ // version that we don't understand
+ if state.Version > StateVersion {
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), state.Version)
+ }
+
+ // Make sure the version is semantic
+ if state.TFVersion != "" {
+ if _, err := version.NewVersion(state.TFVersion); err != nil {
+ return nil, fmt.Errorf(
+ "State contains invalid version: %s\n\n"+
+ "Terraform validates the version format prior to writing it. This\n"+
+ "means that this is invalid of the state becoming corrupted through\n"+
+ "some external means. Please manually modify the Terraform version\n"+
+ "field to be a proper semantic version.",
+ state.TFVersion)
+ }
+ }
+
+ // catch any unitialized fields in the state
+ state.init()
+
+ // Sort it
+ state.sort()
+
+ // Now we write the state back out to detect any changes in normaliztion.
+ // If our state is now written out differently, bump the serial number to
+ // prevent conflicts.
+ var buf bytes.Buffer
+ err := WriteState(state, &buf)
+ if err != nil {
+ return nil, err
+ }
+
+ if !bytes.Equal(jsonBytes, buf.Bytes()) {
+ log.Println("[INFO] state modified during read or write. incrementing serial number")
+ state.Serial++
+ }
+
+ return state, nil
+}
+
+// WriteState writes a state somewhere in a binary format.
+func WriteState(d *State, dst io.Writer) error {
+ // writing a nil state is a noop.
+ if d == nil {
+ return nil
+ }
+
+ // make sure we have no uninitialized fields
+ d.init()
+
+ // Make sure it is sorted
+ d.sort()
+
+ // Ensure the version is set
+ d.Version = StateVersion
+
+ // If the TFVersion is set, verify it. We used to just set the version
+ // here, but this isn't safe since it changes the MD5 sum on some remote
+ // state storage backends such as Atlas. We now leave it be if needed.
+ if d.TFVersion != "" {
+ if _, err := version.NewVersion(d.TFVersion); err != nil {
+ return fmt.Errorf(
+ "Error writing state, invalid version: %s\n\n"+
+ "The Terraform version when writing the state must be a semantic\n"+
+ "version.",
+ d.TFVersion)
+ }
+ }
+
+ // Encode the data in a human-friendly way
+ data, err := json.MarshalIndent(d, "", " ")
+ if err != nil {
+ return fmt.Errorf("Failed to encode state: %s", err)
+ }
+
+ // We append a newline to the data because MarshalIndent doesn't
+ data = append(data, '\n')
+
+ // Write the data out to the dst
+ if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
+ return fmt.Errorf("Failed to write state: %v", err)
+ }
+
+ return nil
+}
+
+// resourceNameSort implements the sort.Interface to sort name parts lexically for
+// strings and numerically for integer indexes.
+type resourceNameSort []string
+
+func (r resourceNameSort) Len() int { return len(r) }
+func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
+
+func (r resourceNameSort) Less(i, j int) bool {
+ iParts := strings.Split(r[i], ".")
+ jParts := strings.Split(r[j], ".")
+
+ end := len(iParts)
+ if len(jParts) < end {
+ end = len(jParts)
+ }
+
+ for idx := 0; idx < end; idx++ {
+ if iParts[idx] == jParts[idx] {
+ continue
+ }
+
+ // sort on the first non-matching part
+ iInt, iIntErr := strconv.Atoi(iParts[idx])
+ jInt, jIntErr := strconv.Atoi(jParts[idx])
+
+ switch {
+ case iIntErr == nil && jIntErr == nil:
+ // sort numerically if both parts are integers
+ return iInt < jInt
+ case iIntErr == nil:
+ // numbers sort before strings
+ return true
+ case jIntErr == nil:
+ return false
+ default:
+ return iParts[idx] < jParts[idx]
+ }
+ }
+
+ return r[i] < r[j]
+}
+
+// moduleStateSort implements sort.Interface to sort module states
+type moduleStateSort []*ModuleState
+
+func (s moduleStateSort) Len() int {
+ return len(s)
+}
+
+func (s moduleStateSort) Less(i, j int) bool {
+ a := s[i]
+ b := s[j]
+
+ // If either is nil, then the nil one is "less" than
+ if a == nil || b == nil {
+ return a == nil
+ }
+
+ // If the lengths are different, then the shorter one always wins
+ if len(a.Path) != len(b.Path) {
+ return len(a.Path) < len(b.Path)
+ }
+
+ // Otherwise, compare lexically
+ return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
+
+func (s moduleStateSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+const stateValidateErrMultiModule = `
+Multiple modules with the same path: %s
+
+This means that there are multiple entries in the "modules" field
+in your state file that point to the same module. This will cause Terraform
+to behave in unexpected and error prone ways and is invalid. Please back up
+and modify your state file manually to resolve this.
+`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644
index 00000000..11637303
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
@@ -0,0 +1,374 @@
+package terraform
+
+import "fmt"
+
+// Add adds the item in the state at the given address.
+//
+// The item can be a ModuleState, ResourceState, or InstanceState. Depending
+// on the item type, the address may or may not be valid. For example, a
+// module cannot be moved to a resource address, however a resource can be
+// moved to a module address (it retains the same name, under that resource).
+//
+// The item can also be a []*ModuleState, which is the case for nested
+// modules. In this case, Add will expect the zero-index to be the top-most
+// module to add and will only nest children from there. For semantics, this
+// is equivalent to module => module.
+//
+// The full semantics of Add:
+//
+// ┌───────────────────┬───────────────────┬───────────────────┐
+// │ Module Address │ Resource Address │ Instance Address │
+// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ ModuleState │ ✓ │ x │ x │
+// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ ResourceState │ ✓ │ ✓ │ maybe* │
+// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ Instance State │ ✓ │ ✓ │ ✓ │
+// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
+//
+// *maybe - Resources can be added at an instance address only if the resource
+// represents a single instance (primary). Example:
+// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
+//
+func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
+ // Parse the address
+
+ toAddr, err := ParseResourceAddress(toAddrRaw)
+ if err != nil {
+ return err
+ }
+
+ // Parse the from address
+ fromAddr, err := ParseResourceAddress(fromAddrRaw)
+ if err != nil {
+ return err
+ }
+
+ // Determine the types
+ from := detectValueAddLoc(raw)
+ to := detectAddrAddLoc(toAddr)
+
+ // Find the function to do this
+ fromMap, ok := stateAddFuncs[from]
+ if !ok {
+ return fmt.Errorf("invalid source to add to state: %T", raw)
+ }
+ f, ok := fromMap[to]
+ if !ok {
+ return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
+ }
+
+ // Call the migrator
+ if err := f(s, fromAddr, toAddr, raw); err != nil {
+ return err
+ }
+
+ // Prune the state
+ s.prune()
+ return nil
+}
+
+func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ // raw can be either *ModuleState or []*ModuleState. The former means
+ // we're moving just one module. The latter means we're moving a module
+ // and children.
+ root := raw
+ var rest []*ModuleState
+ if list, ok := raw.([]*ModuleState); ok {
+ // We need at least one item
+ if len(list) == 0 {
+ return fmt.Errorf("module move with no value to: %s", addr)
+ }
+
+ // The first item is always the root
+ root = list[0]
+ if len(list) > 1 {
+ rest = list[1:]
+ }
+ }
+
+ // Get the actual module state
+ src := root.(*ModuleState).deepcopy()
+
+ // If the target module exists, it is an error
+ path := append([]string{"root"}, addr.Path...)
+ if s.ModuleByPath(path) != nil {
+ return fmt.Errorf("module target is not empty: %s", addr)
+ }
+
+ // Create it and copy our outputs and dependencies
+ mod := s.AddModule(path)
+ mod.Outputs = src.Outputs
+ mod.Dependencies = src.Dependencies
+
+ // Go through the resources perform an add for each of those
+ for k, v := range src.Resources {
+ resourceKey, err := ParseResourceStateKey(k)
+ if err != nil {
+ return err
+ }
+
+ // Update the resource address for this
+ addrCopy := *addr
+ addrCopy.Type = resourceKey.Type
+ addrCopy.Name = resourceKey.Name
+ addrCopy.Index = resourceKey.Index
+ addrCopy.Mode = resourceKey.Mode
+
+ // Perform an add
+ if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
+ return err
+ }
+ }
+
+ // Add all the children if we have them
+ for _, item := range rest {
+ // If item isn't a descendent of our root, then ignore it
+ if !src.IsDescendent(item) {
+ continue
+ }
+
+ // It is! Strip the leading prefix and attach that to our address
+ extra := item.Path[len(src.Path):]
+ addrCopy := addr.Copy()
+ addrCopy.Path = append(addrCopy.Path, extra...)
+
+ // Add it
+ s.Add(fromAddr.String(), addrCopy.String(), item)
+ }
+
+ return nil
+}
+
+func stateAddFunc_Resource_Module(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ // Build the more specific to addr
+ addr := *to
+ addr.Type = from.Type
+ addr.Name = from.Name
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ // raw can be either *ResourceState or []*ResourceState. The former means
+ // we're moving just one resource. The latter means we're moving a count
+ // of resources.
+ if list, ok := raw.([]*ResourceState); ok {
+ // We need at least one item
+ if len(list) == 0 {
+ return fmt.Errorf("resource move with no value to: %s", addr)
+ }
+
+ // If there is an index, this is an error since we can't assign
+ // a set of resources to a single index
+ if addr.Index >= 0 && len(list) > 1 {
+ return fmt.Errorf(
+ "multiple resources can't be moved to a single index: "+
+ "%s => %s", fromAddr, addr)
+ }
+
+ // Add each with a specific index
+ for i, rs := range list {
+ addrCopy := addr.Copy()
+ addrCopy.Index = i
+
+ if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ src := raw.(*ResourceState).deepcopy()
+
+ // Initialize the resource
+ resourceRaw, exists := stateAddInitAddr(s, addr)
+ if exists {
+ return fmt.Errorf("resource exists and not empty: %s", addr)
+ }
+ resource := resourceRaw.(*ResourceState)
+ resource.Type = src.Type
+ resource.Dependencies = src.Dependencies
+ resource.Provider = src.Provider
+
+ // Move the primary
+ if src.Primary != nil {
+ addrCopy := *addr
+ addrCopy.InstanceType = TypePrimary
+ addrCopy.InstanceTypeSet = true
+ if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
+ return err
+ }
+ }
+
+ // Move all deposed
+ if len(src.Deposed) > 0 {
+ resource.Deposed = src.Deposed
+ }
+
+ return nil
+}
+
+func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ src := raw.(*InstanceState).DeepCopy()
+
+ // Create the instance
+ instanceRaw, _ := stateAddInitAddr(s, addr)
+ instance := instanceRaw.(*InstanceState)
+
+ // Set it
+ instance.Set(src)
+
+ return nil
+}
+
+func stateAddFunc_Instance_Module(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ addr := *to
+ addr.Type = from.Type
+ addr.Name = from.Name
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Instance_Resource(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ addr := *to
+ addr.InstanceType = TypePrimary
+ addr.InstanceTypeSet = true
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+// stateAddFunc is the type of function for adding an item to a state
+type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
+
+// stateAddFuncs has the full matrix mapping of the state adders.
+var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
+
+func init() {
+ stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
+ stateAddModule: {
+ stateAddModule: stateAddFunc_Module_Module,
+ },
+ stateAddResource: {
+ stateAddModule: stateAddFunc_Resource_Module,
+ stateAddResource: stateAddFunc_Resource_Resource,
+ },
+ stateAddInstance: {
+ stateAddInstance: stateAddFunc_Instance_Instance,
+ stateAddModule: stateAddFunc_Instance_Module,
+ stateAddResource: stateAddFunc_Instance_Resource,
+ },
+ }
+}
+
+// stateAddLoc is an enum to represent the location where state is being
+// moved from/to. We use this for quick lookups in a function map.
+type stateAddLoc uint
+
+const (
+ stateAddInvalid stateAddLoc = iota
+ stateAddModule
+ stateAddResource
+ stateAddInstance
+)
+
+// detectAddrAddLoc detects the state type for the given address. This
+// function is specifically not unit tested since we consider the State.Add
+// functionality to be comprehensive enough to cover this.
+func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
+ if addr.Name == "" {
+ return stateAddModule
+ }
+
+ if !addr.InstanceTypeSet {
+ return stateAddResource
+ }
+
+ return stateAddInstance
+}
+
+// detectValueAddLoc determines the stateAddLoc value from the raw value
+// that is some State structure.
+func detectValueAddLoc(raw interface{}) stateAddLoc {
+ switch raw.(type) {
+ case *ModuleState:
+ return stateAddModule
+ case []*ModuleState:
+ return stateAddModule
+ case *ResourceState:
+ return stateAddResource
+ case []*ResourceState:
+ return stateAddResource
+ case *InstanceState:
+ return stateAddInstance
+ default:
+ return stateAddInvalid
+ }
+}
+
+// stateAddInitAddr takes a ResourceAddress and creates the non-existing
+// resources up to that point, returning the empty (or existing) interface
+// at that address.
+func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
+ addType := detectAddrAddLoc(addr)
+
+ // Get the module
+ path := append([]string{"root"}, addr.Path...)
+ exists := true
+ mod := s.ModuleByPath(path)
+ if mod == nil {
+ mod = s.AddModule(path)
+ exists = false
+ }
+ if addType == stateAddModule {
+ return mod, exists
+ }
+
+ // Add the resource
+ resourceKey := (&ResourceStateKey{
+ Name: addr.Name,
+ Type: addr.Type,
+ Index: addr.Index,
+ Mode: addr.Mode,
+ }).String()
+ exists = true
+ resource, ok := mod.Resources[resourceKey]
+ if !ok {
+ resource = &ResourceState{Type: addr.Type}
+ resource.init()
+ mod.Resources[resourceKey] = resource
+ exists = false
+ }
+ if addType == stateAddResource {
+ return resource, exists
+ }
+
+ // Get the instance
+ exists = true
+ instance := &InstanceState{}
+ switch addr.InstanceType {
+ case TypePrimary, TypeTainted:
+ if v := resource.Primary; v != nil {
+ instance = resource.Primary
+ } else {
+ exists = false
+ }
+ case TypeDeposed:
+ idx := addr.Index
+ if addr.Index < 0 {
+ idx = 0
+ }
+ if len(resource.Deposed) > idx {
+ instance = resource.Deposed[idx]
+ } else {
+ resource.Deposed = append(resource.Deposed, instance)
+ exists = false
+ }
+ }
+
+ return instance, exists
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644
index 00000000..2dcb11b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
@@ -0,0 +1,267 @@
+package terraform
+
+import (
+ "fmt"
+ "sort"
+)
+
+// StateFilter is responsible for filtering and searching a state.
+//
+// This is a separate struct from State rather than a method on State
+// because StateFilter might create sidecar data structures to optimize
+// filtering on the state.
+//
+// If you change the State, the filter created is invalid and either
+// Reset should be called or a new one should be allocated. StateFilter
+// will not watch State for changes and do this for you. If you filter after
+// changing the State without calling Reset, the behavior is not defined.
+type StateFilter struct {
+ State *State
+}
+
+// Filter takes the addresses specified by fs and finds all the matches.
+// The values of fs are resource addressing syntax that can be parsed by
+// ParseResourceAddress.
+func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
+ // Parse all the addresses
+ as := make([]*ResourceAddress, len(fs))
+ for i, v := range fs {
+ a, err := ParseResourceAddress(v)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
+ }
+
+ as[i] = a
+ }
+
+ // If we weren't given any filters, then we list all
+ if len(fs) == 0 {
+ as = append(as, &ResourceAddress{Index: -1})
+ }
+
+ // Filter each of the address. We keep track of this in a map to
+ // strip duplicates.
+ resultSet := make(map[string]*StateFilterResult)
+ for _, a := range as {
+ for _, r := range f.filterSingle(a) {
+ resultSet[r.String()] = r
+ }
+ }
+
+ // Make the result list
+ results := make([]*StateFilterResult, 0, len(resultSet))
+ for _, v := range resultSet {
+ results = append(results, v)
+ }
+
+ // Sort them and return
+ sort.Sort(StateFilterResultSlice(results))
+ return results, nil
+}
+
+func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
+ // The slice to keep track of results
+ var results []*StateFilterResult
+
+ // Go through modules first.
+ modules := make([]*ModuleState, 0, len(f.State.Modules))
+ for _, m := range f.State.Modules {
+ if f.relevant(a, m) {
+ modules = append(modules, m)
+
+ // Only add the module to the results if we haven't specified a type.
+ // We also ignore the root module.
+ if a.Type == "" && len(m.Path) > 1 {
+ results = append(results, &StateFilterResult{
+ Path: m.Path[1:],
+ Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
+ Value: m,
+ })
+ }
+ }
+ }
+
+ // With the modules set, go through all the resources within
+ // the modules to find relevant resources.
+ for _, m := range modules {
+ for n, r := range m.Resources {
+ // The name in the state contains valuable information. Parse.
+ key, err := ParseResourceStateKey(n)
+ if err != nil {
+ // If we get an error parsing, then just ignore it
+ // out of the state.
+ continue
+ }
+
+ // Older states and test fixtures often don't contain the
+ // type directly on the ResourceState. We add this so StateFilter
+ // is a bit more robust.
+ if r.Type == "" {
+ r.Type = key.Type
+ }
+
+ if f.relevant(a, r) {
+ if a.Name != "" && a.Name != key.Name {
+ // Name doesn't match
+ continue
+ }
+
+ if a.Index >= 0 && key.Index != a.Index {
+ // Index doesn't match
+ continue
+ }
+
+ if a.Name != "" && a.Name != key.Name {
+ continue
+ }
+
+ // Build the address for this resource
+ addr := &ResourceAddress{
+ Path: m.Path[1:],
+ Name: key.Name,
+ Type: key.Type,
+ Index: key.Index,
+ }
+
+ // Add the resource level result
+ resourceResult := &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Value: r,
+ }
+ if !a.InstanceTypeSet {
+ results = append(results, resourceResult)
+ }
+
+ // Add the instances
+ if r.Primary != nil {
+ addr.InstanceType = TypePrimary
+ addr.InstanceTypeSet = false
+ results = append(results, &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Parent: resourceResult,
+ Value: r.Primary,
+ })
+ }
+
+ for _, instance := range r.Deposed {
+ if f.relevant(a, instance) {
+ addr.InstanceType = TypeDeposed
+ addr.InstanceTypeSet = true
+ results = append(results, &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Parent: resourceResult,
+ Value: instance,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ return results
+}
+
+// relevant checks for relevance of this address against the given value.
+func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
+ switch v := raw.(type) {
+ case *ModuleState:
+ path := v.Path[1:]
+
+ if len(addr.Path) > len(path) {
+ // Longer path in address means there is no way we match.
+ return false
+ }
+
+ // Check for a prefix match
+ for i, p := range addr.Path {
+ if path[i] != p {
+ // Any mismatches don't match.
+ return false
+ }
+ }
+
+ return true
+ case *ResourceState:
+ if addr.Type == "" {
+ // If we have no resource type, then we're interested in all!
+ return true
+ }
+
+ // If the type doesn't match we fail immediately
+ if v.Type != addr.Type {
+ return false
+ }
+
+ return true
+ default:
+ // If we don't know about it, let's just say no
+ return false
+ }
+}
+
+// StateFilterResult is a single result from a filter operation. Filter
+// can match multiple things within a state (module, resource, instance, etc.)
+// and this unifies that.
+type StateFilterResult struct {
+ // Module path of the result
+ Path []string
+
+ // Address is the address that can be used to reference this exact result.
+ Address string
+
+ // Parent, if non-nil, is a parent of this result. For instances, the
+ // parent would be a resource. For resources, the parent would be
+ // a module. For modules, this is currently nil.
+ Parent *StateFilterResult
+
+ // Value is the actual value. This must be type switched on. It can be
+ // any data structures that `State` can hold: `ModuleState`,
+ // `ResourceState`, `InstanceState`.
+ Value interface{}
+}
+
+func (r *StateFilterResult) String() string {
+ return fmt.Sprintf("%T: %s", r.Value, r.Address)
+}
+
+func (r *StateFilterResult) sortedType() int {
+ switch r.Value.(type) {
+ case *ModuleState:
+ return 0
+ case *ResourceState:
+ return 1
+ case *InstanceState:
+ return 2
+ default:
+ return 50
+ }
+}
+
+// StateFilterResultSlice is a slice of results that implements
+// sort.Interface. The sorting goal is what is most appealing to
+// human output.
+type StateFilterResultSlice []*StateFilterResult
+
+func (s StateFilterResultSlice) Len() int { return len(s) }
+func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s StateFilterResultSlice) Less(i, j int) bool {
+ a, b := s[i], s[j]
+
+ // if these address contain an index, we want to sort by index rather than name
+ addrA, errA := ParseResourceAddress(a.Address)
+ addrB, errB := ParseResourceAddress(b.Address)
+ if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
+ return addrA.Index < addrB.Index
+ }
+
+ // If the addresses are different it is just lexographic sorting
+ if a.Address != b.Address {
+ return a.Address < b.Address
+ }
+
+ // Addresses are the same, which means it matters on the type
+ return a.sortedType() < b.sortedType()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644
index 00000000..aa13cce8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
@@ -0,0 +1,189 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// upgradeStateV1ToV2 is used to upgrade a V1 state representation
+// into a V2 state representation
+func upgradeStateV1ToV2(old *stateV1) (*State, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ remote, err := old.Remote.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+ }
+
+ modules := make([]*ModuleState, len(old.Modules))
+ for i, module := range old.Modules {
+ upgraded, err := module.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+ }
+ modules[i] = upgraded
+ }
+ if len(modules) == 0 {
+ modules = nil
+ }
+
+ newState := &State{
+ Version: 2,
+ Serial: old.Serial,
+ Remote: remote,
+ Modules: modules,
+ }
+
+ newState.sort()
+ newState.init()
+
+ return newState, nil
+}
+
+func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ config, err := copystructure.Copy(old.Config)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
+ }
+
+ return &RemoteState{
+ Type: old.Type,
+ Config: config.(map[string]string),
+ }, nil
+}
+
+func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ pathRaw, err := copystructure.Copy(old.Path)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+ path, ok := pathRaw.([]string)
+ if !ok {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
+ }
+ if len(path) == 0 {
+ // We found some V1 states with a nil path. Assume root and catch
+ // duplicate path errors later (as part of Validate).
+ path = rootModulePath
+ }
+
+ // Outputs needs upgrading to use the new structure
+ outputs := make(map[string]*OutputState)
+ for key, output := range old.Outputs {
+ outputs[key] = &OutputState{
+ Type: "string",
+ Value: output,
+ Sensitive: false,
+ }
+ }
+
+ resources := make(map[string]*ResourceState)
+ for key, oldResource := range old.Resources {
+ upgraded, err := oldResource.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+ resources[key] = upgraded
+ }
+
+ dependencies, err := copystructure.Copy(old.Dependencies)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+
+ return &ModuleState{
+ Path: path,
+ Outputs: outputs,
+ Resources: resources,
+ Dependencies: dependencies.([]string),
+ }, nil
+}
+
+func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ dependencies, err := copystructure.Copy(old.Dependencies)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+
+ primary, err := old.Primary.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+
+ deposed := make([]*InstanceState, len(old.Deposed))
+ for i, v := range old.Deposed {
+ upgraded, err := v.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+ deposed[i] = upgraded
+ }
+ if len(deposed) == 0 {
+ deposed = nil
+ }
+
+ return &ResourceState{
+ Type: old.Type,
+ Dependencies: dependencies.([]string),
+ Primary: primary,
+ Deposed: deposed,
+ Provider: old.Provider,
+ }, nil
+}
+
+func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ attributes, err := copystructure.Copy(old.Attributes)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+ ephemeral, err := old.Ephemeral.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+
+ meta, err := copystructure.Copy(old.Meta)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+
+ newMeta := make(map[string]interface{})
+ for k, v := range meta.(map[string]string) {
+ newMeta[k] = v
+ }
+
+ return &InstanceState{
+ ID: old.ID,
+ Attributes: attributes.(map[string]string),
+ Ephemeral: *ephemeral,
+ Meta: newMeta,
+ }, nil
+}
+
+func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
+ connInfo, err := copystructure.Copy(old.ConnInfo)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
+ }
+ return &EphemeralState{
+ ConnInfo: connInfo.(map[string]string),
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644
index 00000000..e52d35fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
@@ -0,0 +1,142 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// The upgrade process from V2 to V3 state does not affect the structure,
+// so we do not need to redeclare all of the structs involved - we just
+// take a deep copy of the old structure and assert the version number is
+// as we expect.
+func upgradeStateV2ToV3(old *State) (*State, error) {
+ new := old.DeepCopy()
+
+ // Ensure the copied version is v2 before attempting to upgrade
+ if new.Version != 2 {
+ return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
+ "a state which is not version 2.")
+ }
+
+ // Set the new version number
+ new.Version = 3
+
+ // Change the counts for things which look like maps to use the %
+ // syntax. Remove counts for empty collections - they will be added
+ // back in later.
+ for _, module := range new.Modules {
+ for _, resource := range module.Resources {
+ // Upgrade Primary
+ if resource.Primary != nil {
+ upgradeAttributesV2ToV3(resource.Primary)
+ }
+
+ // Upgrade Deposed
+ if resource.Deposed != nil {
+ for _, deposed := range resource.Deposed {
+ upgradeAttributesV2ToV3(deposed)
+ }
+ }
+ }
+ }
+
+ return new, nil
+}
+
+func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
+ collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
+ collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
+
+ // Identify the key prefix of anything which is a collection
+ var collectionKeyPrefixes []string
+ for key := range instanceState.Attributes {
+ if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+ collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
+ }
+ }
+ sort.Strings(collectionKeyPrefixes)
+
+ log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
+
+ // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
+ // run very often.
+ for _, prefix := range collectionKeyPrefixes {
+ // First get the actual keys that belong to this prefix
+ var potentialKeysMatching []string
+ for key := range instanceState.Attributes {
+ if strings.HasPrefix(key, prefix) {
+ potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
+ }
+ }
+ sort.Strings(potentialKeysMatching)
+
+ var actualKeysMatching []string
+ for _, key := range potentialKeysMatching {
+ if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+ actualKeysMatching = append(actualKeysMatching, submatches[0][1])
+ } else {
+ if key != "#" {
+ actualKeysMatching = append(actualKeysMatching, key)
+ }
+ }
+ }
+ actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
+
+ // Now inspect the keys in order to determine whether this is most likely to be
+ // a map, list or set. There is room for error here, so we log in each case. If
+ // there is no method of telling, we remove the key from the InstanceState in
+ // order that it will be recreated. Again, this could be rolled into fewer loops
+ // but we prefer clarity.
+
+ oldCountKey := fmt.Sprintf("%s#", prefix)
+
+ // First, detect "obvious" maps - which have non-numeric keys (mostly).
+ hasNonNumericKeys := false
+ for _, key := range actualKeysMatching {
+ if _, err := strconv.Atoi(key); err != nil {
+ hasNonNumericKeys = true
+ }
+ }
+ if hasNonNumericKeys {
+ newCountKey := fmt.Sprintf("%s%%", prefix)
+
+ instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
+ delete(instanceState.Attributes, oldCountKey)
+ log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
+ strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
+ }
+
+ // Now detect empty collections and remove them from state.
+ if len(actualKeysMatching) == 0 {
+ delete(instanceState.Attributes, oldCountKey)
+ log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
+ strings.TrimSuffix(prefix, "."))
+ }
+ }
+
+ return nil
+}
+
+// uniqueSortedStrings removes duplicates from a slice of strings and returns
+// a sorted slice of the unique strings.
+func uniqueSortedStrings(input []string) []string {
+ uniquemap := make(map[string]struct{})
+ for _, str := range input {
+ uniquemap[str] = struct{}{}
+ }
+
+ output := make([]string, len(uniquemap))
+
+ i := 0
+ for key := range uniquemap {
+ output[i] = key
+ i = i + 1
+ }
+
+ sort.Strings(output)
+ return output
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644
index 00000000..68cffb41
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
@@ -0,0 +1,145 @@
+package terraform
+
+// stateV1 keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+//
+// stateV1 is _only used for the purposes of backwards compatibility
+// and is no longer used in Terraform.
+//
+// For the upgrade process, see state_upgrade_v1_to_v2.go
+type stateV1 struct {
+ // Version is the protocol version. "1" for a StateV1.
+ Version int `json:"version"`
+
+ // Serial is incremented on any operation that modifies
+ // the State file. It is used to detect potentially conflicting
+ // updates.
+ Serial int64 `json:"serial"`
+
+ // Remote is used to track the metadata required to
+ // pull and push state files from a remote storage endpoint.
+ Remote *remoteStateV1 `json:"remote,omitempty"`
+
+ // Modules contains all the modules in a breadth-first order
+ Modules []*moduleStateV1 `json:"modules"`
+}
+
+type remoteStateV1 struct {
+ // Type controls the client we use for the remote state
+ Type string `json:"type"`
+
+ // Config is used to store arbitrary configuration that
+ // is type specific
+ Config map[string]string `json:"config"`
+}
+
+type moduleStateV1 struct {
+ // Path is the import path from the root module. Modules imports are
+ // always disjoint, so the path represents amodule tree
+ Path []string `json:"path"`
+
+ // Outputs declared by the module and maintained for each module
+ // even though only the root module technically needs to be kept.
+ // This allows operators to inspect values at the boundaries.
+ Outputs map[string]string `json:"outputs"`
+
+ // Resources is a mapping of the logically named resource to
+ // the state of the resource. Each resource may actually have
+ // N instances underneath, although a user only needs to think
+ // about the 1:1 case.
+ Resources map[string]*resourceStateV1 `json:"resources"`
+
+ // Dependencies are a list of things that this module relies on
+ // existing to remain intact. For example: an module may depend
+ // on a VPC ID given by an aws_vpc resource.
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a module that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on,omitempty"`
+}
+
+type resourceStateV1 struct {
+ // This is filled in and managed by Terraform, and is the resource
+ // type itself such as "mycloud_instance". If a resource provider sets
+ // this value, it won't be persisted.
+ Type string `json:"type"`
+
+ // Dependencies are a list of things that this resource relies on
+ // existing to remain intact. For example: an AWS instance might
+ // depend on a subnet (which itself might depend on a VPC, and so
+ // on).
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a resource that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on,omitempty"`
+
+ // Primary is the current active instance for this resource.
+ // It can be replaced but only after a successful creation.
+ // This is the instances on which providers will act.
+ Primary *instanceStateV1 `json:"primary"`
+
+ // Tainted is used to track any underlying instances that
+ // have been created but are in a bad or unknown state and
+ // need to be cleaned up subsequently. In the
+ // standard case, there is only at most a single instance.
+ // However, in pathological cases, it is possible for the number
+ // of instances to accumulate.
+ Tainted []*instanceStateV1 `json:"tainted,omitempty"`
+
+ // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+ // Primary is Deposed to get it out of the way for the replacement Primary to
+ // be created by Apply. If the replacement Primary creates successfully, the
+ // Deposed instance is cleaned up. If there were problems creating the
+ // replacement, the instance remains in the Deposed list so it can be
+ // destroyed in a future run. Functionally, Deposed instances are very
+ // similar to Tainted instances in that Terraform is only tracking them in
+ // order to remember to destroy them.
+ Deposed []*instanceStateV1 `json:"deposed,omitempty"`
+
+ // Provider is used when a resource is connected to a provider with an alias.
+ // If this string is empty, the resource is connected to the default provider,
+ // e.g. "aws_instance" goes with the "aws" provider.
+ // If the resource block contained a "provider" key, that value will be set here.
+ Provider string `json:"provider,omitempty"`
+}
+
+type instanceStateV1 struct {
+ // A unique ID for this resource. This is opaque to Terraform
+ // and is only meant as a lookup mechanism for the providers.
+ ID string `json:"id"`
+
+ // Attributes are basic information about the resource. Any keys here
+ // are accessible in variable format within Terraform configurations:
+ // ${resourcetype.name.attribute}.
+ Attributes map[string]string `json:"attributes,omitempty"`
+
+ // Ephemeral is used to store any state associated with this instance
+ // that is necessary for the Terraform run to complete, but is not
+ // persisted to a state file.
+ Ephemeral ephemeralStateV1 `json:"-"`
+
+ // Meta is a simple K/V map that is persisted to the State but otherwise
+ // ignored by Terraform core. It's meant to be used for accounting by
+ // external client code.
+ Meta map[string]string `json:"meta,omitempty"`
+}
+
+type ephemeralStateV1 struct {
+ // ConnInfo is used for the providers to export information which is
+ // used to connect to the resource for provisioning. For example,
+ // this could contain SSH or WinRM credentials.
+ ConnInfo map[string]string `json:"-"`
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644
index 00000000..3f0418d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+ "os"
+ "testing"
+)
+
+// TestStateFile writes the given state to the path.
+func TestStateFile(t *testing.T, path string, state *State) {
+ f, err := os.Create(path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer f.Close()
+
+ if err := WriteState(state, f); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644
index 00000000..f4a431a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -0,0 +1,52 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphTransformer is the interface that transformers implement. This
+// interface is only for transforms that need entire graph visibility.
+type GraphTransformer interface {
+ Transform(*Graph) error
+}
+
+// GraphVertexTransformer is an interface that transforms a single
+// Vertex within with graph. This is a specialization of GraphTransformer
+// that makes it easy to do vertex replacement.
+//
+// The GraphTransformer that runs through the GraphVertexTransformers is
+// VertexTransformer.
+type GraphVertexTransformer interface {
+ Transform(dag.Vertex) (dag.Vertex, error)
+}
+
+// GraphTransformIf is a helper function that conditionally returns a
+// GraphTransformer given. This is useful for calling inline a sequence
+// of transforms without having to split it up into multiple append() calls.
+func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
+ if f() {
+ return then
+ }
+
+ return nil
+}
+
+type graphTransformerMulti struct {
+ Transforms []GraphTransformer
+}
+
+func (t *graphTransformerMulti) Transform(g *Graph) error {
+ for _, t := range t.Transforms {
+ if err := t.Transform(g); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GraphTransformMulti combines multiple graph transformers into a single
+// GraphTransformer that runs all the individual graph transformers.
+func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
+ return &graphTransformerMulti{Transforms: ts}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644
index 00000000..10506ea0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachProvider is an interface that must be implemented by nodes
+// that want provider configurations attached.
+type GraphNodeAttachProvider interface {
+ // Must be implemented to determine the path for the configuration
+ GraphNodeSubPath
+
+ // ProviderName with no module prefix. Example: "aws".
+ ProviderName() string
+
+ // Sets the configuration
+ AttachProvider(*config.ProviderConfig)
+}
+
+// AttachProviderConfigTransformer goes through the graph and attaches
+// provider configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachProviderConfigTransformer struct {
+ Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
+ if err := t.attachProviders(g); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
+ // Go through and find GraphNodeAttachProvider
+ for _, v := range g.Vertices() {
+ // Only care about GraphNodeAttachProvider implementations
+ apn, ok := v.(GraphNodeAttachProvider)
+ if !ok {
+ continue
+ }
+
+ // Determine what we're looking for
+ path := normalizeModulePath(apn.Path())
+ path = path[1:]
+ name := apn.ProviderName()
+ log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
+
+ // Get the configuration.
+ tree := t.Module.Child(path)
+ if tree == nil {
+ continue
+ }
+
+ // Go through the provider configs to find the matching config
+ for _, p := range tree.Config().ProviderConfigs {
+ // Build the name, which is "name.alias" if an alias exists
+ current := p.Name
+ if p.Alias != "" {
+ current += "." + p.Alias
+ }
+
+ // If the configs match then attach!
+ if current == name {
+ log.Printf("[TRACE] Attaching provider config: %#v", p)
+ apn.AttachProvider(p)
+ break
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644
index 00000000..f2ee37e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
+// that want resource configurations attached.
+type GraphNodeAttachResourceConfig interface {
+ // ResourceAddr is the address to the resource
+ ResourceAddr() *ResourceAddress
+
+ // Sets the configuration
+ AttachResourceConfig(*config.Resource)
+}
+
+// AttachResourceConfigTransformer goes through the graph and attaches
+// resource configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachResourceConfigTransformer struct {
+ Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
+
+ // Go through and find GraphNodeAttachResource
+ for _, v := range g.Vertices() {
+ // Only care about GraphNodeAttachResource implementations
+ arn, ok := v.(GraphNodeAttachResourceConfig)
+ if !ok {
+ continue
+ }
+
+ // Determine what we're looking for
+ addr := arn.ResourceAddr()
+ log.Printf(
+ "[TRACE] AttachResourceConfigTransformer: Attach resource "+
+ "config request: %s", addr)
+
+ // Get the configuration.
+ path := normalizeModulePath(addr.Path)
+ path = path[1:]
+ tree := t.Module.Child(path)
+ if tree == nil {
+ continue
+ }
+
+ // Go through the resource configs to find the matching config
+ for _, r := range tree.Config().Resources {
+ // Get a resource address so we can compare
+ a, err := parseResourceAddressConfig(r)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing config address, this is a bug: %#v", r))
+ }
+ a.Path = addr.Path
+
+ // If this is not the same resource, then continue
+ if !a.Equals(addr) {
+ continue
+ }
+
+ log.Printf("[TRACE] Attaching resource config: %#v", r)
+ arn.AttachResourceConfig(r)
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644
index 00000000..564ff08f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -0,0 +1,68 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeAttachResourceState is an interface that can be implemented
+// to request that a ResourceState is attached to the node.
+type GraphNodeAttachResourceState interface {
+ // The address to the resource for the state
+ ResourceAddr() *ResourceAddress
+
+ // Sets the state
+ AttachResourceState(*ResourceState)
+}
+
+// AttachStateTransformer goes through the graph and attaches
+// state to nodes that implement the interfaces above.
+type AttachStateTransformer struct {
+ State *State // State is the root state
+}
+
+func (t *AttachStateTransformer) Transform(g *Graph) error {
+ // If no state, then nothing to do
+ if t.State == nil {
+ log.Printf("[DEBUG] Not attaching any state: state is nil")
+ return nil
+ }
+
+ filter := &StateFilter{State: t.State}
+ for _, v := range g.Vertices() {
+ // Only care about nodes requesting we're adding state
+ an, ok := v.(GraphNodeAttachResourceState)
+ if !ok {
+ continue
+ }
+ addr := an.ResourceAddr()
+
+ // Get the module state
+ results, err := filter.Filter(addr.String())
+ if err != nil {
+ return err
+ }
+
+ // Attach the first resource state we get
+ found := false
+ for _, result := range results {
+ if rs, ok := result.Value.(*ResourceState); ok {
+ log.Printf(
+ "[DEBUG] Attaching resource state to %q: %#v",
+ dag.VertexName(v), rs)
+ an.AttachResourceState(rs)
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ log.Printf(
+ "[DEBUG] Resource state not found for %q: %s",
+ dag.VertexName(v), addr)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644
index 00000000..61bce853
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -0,0 +1,135 @@
+package terraform
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConfigTransformer is a GraphTransformer that adds all the resources
+// from the configuration to the graph.
+//
+// The module used to configure this transformer must be the root module.
+//
+// Only resources are added to the graph. Variables, outputs, and
+// providers must be added via other transforms.
+//
+// Unlike ConfigTransformerOld, this transformer creates a graph with
+// all resources including module resources, rather than creating module
+// nodes that are then "flattened".
+type ConfigTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ // Module is the module to add resources from.
+ Module *module.Tree
+
+ // Unique will only add resources that aren't already present in the graph.
+ Unique bool
+
+ // Mode will only add resources that match the given mode
+ ModeFilter bool
+ Mode config.ResourceMode
+
+ l sync.Mutex
+ uniqueMap map[string]struct{}
+}
+
+func (t *ConfigTransformer) Transform(g *Graph) error {
+ // Lock since we use some internal state
+ t.l.Lock()
+ defer t.l.Unlock()
+
+ // If no module is given, we don't do anything
+ if t.Module == nil {
+ return nil
+ }
+
+ // If the module isn't loaded, that is simply an error
+ if !t.Module.Loaded() {
+ return errors.New("module must be loaded for ConfigTransformer")
+ }
+
+ // Reset the uniqueness map. If we're tracking uniques, then populate
+ // it with addresses.
+ t.uniqueMap = make(map[string]struct{})
+ defer func() { t.uniqueMap = nil }()
+ if t.Unique {
+ for _, v := range g.Vertices() {
+ if rn, ok := v.(GraphNodeResource); ok {
+ t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
+ }
+ }
+ }
+
+ // Start the transformation process
+ return t.transform(g, t.Module)
+}
+
+func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no config, do nothing
+ if m == nil {
+ return nil
+ }
+
+ // Add our resources
+ if err := t.transformSingle(g, m); err != nil {
+ return err
+ }
+
+ // Transform all the children.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
+ log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
+
+ // Get the configuration for this module
+ conf := m.Config()
+
+ // Build the path we're at
+ path := m.Path()
+
+ // Write all the resources out
+ for _, r := range conf.Resources {
+ // Build the resource address
+ addr, err := parseResourceAddressConfig(r)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing config address, this is a bug: %#v", r))
+ }
+ addr.Path = path
+
+ // If this is already in our uniqueness map, don't add it again
+ if _, ok := t.uniqueMap[addr.String()]; ok {
+ continue
+ }
+
+ // Remove non-matching modes
+ if t.ModeFilter && addr.Mode != t.Mode {
+ continue
+ }
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644
index 00000000..92f9888d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+ "errors"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// FlatConfigTransformer is a GraphTransformer that adds the configuration
+// to the graph. The module used to configure this transformer must be
+// the root module.
+//
+// This transform adds the nodes but doesn't connect any of the references.
+// The ReferenceTransformer should be used for that.
+//
+// NOTE: In relation to ConfigTransformer: this is a newer generation config
+// transformer. It puts the _entire_ config into the graph (there is no
+// "flattening" step as before).
+type FlatConfigTransformer struct {
+ Concrete ConcreteResourceNodeFunc // What to turn resources into
+
+ Module *module.Tree
+}
+
+func (t *FlatConfigTransformer) Transform(g *Graph) error {
+ // If no module, we do nothing
+ if t.Module == nil {
+ return nil
+ }
+
+ // If the module is not loaded, that is an error
+ if !t.Module.Loaded() {
+ return errors.New("module must be loaded")
+ }
+
+ return t.transform(g, t.Module)
+}
+
+func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no module, no problem
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ // Get the configuration for this module
+ config := m.Config()
+
+ // Write all the resources out
+ for _, r := range config.Resources {
+ // Grab the address for this resource
+ addr, err := parseResourceAddressConfig(r)
+ if err != nil {
+ return err
+ }
+ addr.Path = m.Path()
+
+ // Build the abstract resource. We have the config already so
+ // we'll just pre-populate that.
+ abstract := &NodeAbstractResource{
+ Addr: addr,
+ Config: r,
+ }
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644
index 00000000..ec412582
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
@@ -0,0 +1,23 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// varNameForVar returns the VarName value for an interpolated variable.
+// This value is compared to the VarName() value for the nodes within the
+// graph to build the graph edges.
+func varNameForVar(raw config.InterpolatedVariable) string {
+ switch v := raw.(type) {
+ case *config.ModuleVariable:
+ return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
+ case *config.ResourceVariable:
+ return v.ResourceId()
+ case *config.UserVariable:
+ return fmt.Sprintf("var.%s", v.Name)
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644
index 00000000..83415f35
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -0,0 +1,28 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// CountBoundaryTransformer adds a node that depends on everything else
+// so that it runs last in order to clean up the state for nodes that
+// are on the "count boundary": "foo.0" when only one exists becomes "foo"
+type CountBoundaryTransformer struct{}
+
+func (t *CountBoundaryTransformer) Transform(g *Graph) error {
+ node := &NodeCountBoundary{}
+ g.Add(node)
+
+ // Depends on everything
+ for _, v := range g.Vertices() {
+ // Don't connect to ourselves
+ if v == node {
+ continue
+ }
+
+ // Connect!
+ g.Connect(dag.BasicEdge(node, v))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644
index 00000000..2148cef4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -0,0 +1,168 @@
+package terraform
+
+import "fmt"
+
+// DeposedTransformer is a GraphTransformer that adds deposed resources
+// to the graph.
+type DeposedTransformer struct {
+ // State is the global state. We'll automatically find the correct
+ // ModuleState based on the Graph.Path that is being transformed.
+ State *State
+
+ // View, if non-empty, is the ModuleState.View used around the state
+ // to find deposed resources.
+ View string
+}
+
+func (t *DeposedTransformer) Transform(g *Graph) error {
+ state := t.State.ModuleByPath(g.Path)
+ if state == nil {
+ // If there is no state for our module there can't be any deposed
+ // resources, since they live in the state.
+ return nil
+ }
+
+ // If we have a view, apply it now
+ if t.View != "" {
+ state = state.View(t.View)
+ }
+
+ // Go through all the resources in our state to look for deposed resources
+ for k, rs := range state.Resources {
+ // If we have no deposed resources, then move on
+ if len(rs.Deposed) == 0 {
+ continue
+ }
+ deposed := rs.Deposed
+
+ for i, _ := range deposed {
+ g.Add(&graphNodeDeposedResource{
+ Index: i,
+ ResourceName: k,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ })
+ }
+ }
+
+ return nil
+}
+
+// graphNodeDeposedResource is the graph vertex representing a deposed resource.
+type graphNodeDeposedResource struct {
+ Index int
+ ResourceName string
+ ResourceType string
+ Provider string
+}
+
+func (n *graphNodeDeposedResource) Name() string {
+ return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
+}
+
+func (n *graphNodeDeposedResource) ProvidedBy() []string {
+ return []string{resourceProvider(n.ResourceName, n.Provider)}
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeDeposedResource) EvalTree() EvalNode {
+ var provider ResourceProvider
+ var state *InstanceState
+
+ seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
+
+ // Build instance info
+ info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
+ seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
+
+ // Refresh the resource
+ seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadStateDeposed{
+ Name: n.ResourceName,
+ Output: &state,
+ Index: n.Index,
+ },
+ &EvalRefresh{
+ Info: info,
+ Provider: &provider,
+ State: &state,
+ Output: &state,
+ },
+ &EvalWriteStateDeposed{
+ Name: n.ResourceName,
+ ResourceType: n.ResourceType,
+ Provider: n.Provider,
+ State: &state,
+ Index: n.Index,
+ },
+ },
+ },
+ })
+
+ // Apply
+ var diff *InstanceDiff
+ var err error
+ seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+ Ops: []walkOperation{walkApply, walkDestroy},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadStateDeposed{
+ Name: n.ResourceName,
+ Output: &state,
+ Index: n.Index,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diff,
+ },
+ &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ },
+ // Always write the resource back to the state deposed... if it
+ // was successfully destroyed it will be pruned. If it was not, it will
+ // be caught on the next run.
+ &EvalWriteStateDeposed{
+ Name: n.ResourceName,
+ ResourceType: n.ResourceType,
+ Provider: n.Provider,
+ State: &state,
+ Index: n.Index,
+ },
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalReturnError{
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ },
+ })
+
+ return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644
index 00000000..edfb460b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -0,0 +1,257 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyerCBD must be implemented by nodes that might be
+// create-before-destroy destroyers.
+type GraphNodeDestroyerCBD interface {
+ GraphNodeDestroyer
+
+ // CreateBeforeDestroy returns true if this node represents a node
+ // that is doing a CBD.
+ CreateBeforeDestroy() bool
+
+ // ModifyCreateBeforeDestroy is called when the CBD state of a node
+ // is changed dynamically. This can return an error if this isn't
+ // allowed.
+ ModifyCreateBeforeDestroy(bool) error
+}
+
+// CBDEdgeTransformer modifies the edges of CBD nodes that went through
+// the DestroyEdgeTransformer to have the right dependencies. There are
+// two real tasks here:
+//
+// 1. With CBD, the destroy edge is inverted: the destroy depends on
+// the creation.
+//
+// 2. A_d must depend on resources that depend on A. This is to enable
+// the destroy to only happen once nodes that depend on A successfully
+// update to A. Example: adding a web server updates the load balancer
+// before deleting the old web server.
+//
+type CBDEdgeTransformer struct {
+ // Module and State are only needed to look up dependencies in
+ // any way possible. Either can be nil if not availabile.
+ Module *module.Tree
+ State *State
+}
+
+func (t *CBDEdgeTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
+
+ // Go through and reverse any destroy edges
+ destroyMap := make(map[string][]dag.Vertex)
+ for _, v := range g.Vertices() {
+ dn, ok := v.(GraphNodeDestroyerCBD)
+ if !ok {
+ continue
+ }
+
+ if !dn.CreateBeforeDestroy() {
+ // If there are no CBD ancestors (dependent nodes), then we
+ // do nothing here.
+ if !t.hasCBDAncestor(g, v) {
+ continue
+ }
+
+ // If this isn't naturally a CBD node, this means that an ancestor is
+ // and we need to auto-upgrade this node to CBD. We do this because
+ // a CBD node depending on non-CBD will result in cycles. To avoid this,
+ // we always attempt to upgrade it.
+ if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
+ return fmt.Errorf(
+ "%s: must have create before destroy enabled because "+
+ "a dependent resource has CBD enabled. However, when "+
+ "attempting to automatically do this, an error occurred: %s",
+ dag.VertexName(v), err)
+ }
+ }
+
+ // Find the destroy edge. There should only be one.
+ for _, e := range g.EdgesTo(v) {
+ // Not a destroy edge, ignore it
+ de, ok := e.(*DestroyEdge)
+ if !ok {
+ continue
+ }
+
+ log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
+ dag.VertexName(de.Source()), dag.VertexName(de.Target()))
+
+ // Found it! Invert.
+ g.RemoveEdge(de)
+ g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
+ }
+
+ // If the address has an index, we strip that. Our depMap creation
+ // graph doesn't expand counts so we don't currently get _exact_
+ // dependencies. One day when we limit dependencies more exactly
+ // this will have to change. We have a test case covering this
+ // (depNonCBDCountBoth) so it'll be caught.
+ addr := dn.DestroyAddr()
+ if addr.Index >= 0 {
+ addr = addr.Copy() // Copy so that we don't modify any pointers
+ addr.Index = -1
+ }
+
+ // Add this to the list of nodes that we need to fix up
+ // the edges for (step 2 above in the docs).
+ key := addr.String()
+ destroyMap[key] = append(destroyMap[key], v)
+ }
+
+ // If we have no CBD nodes, then our work here is done
+ if len(destroyMap) == 0 {
+ return nil
+ }
+
+ // We have CBD nodes. We now have to move on to the much more difficult
+ // task of connecting dependencies of the creation side of the destroy
+ // to the destruction node. The easiest way to explain this is an example:
+ //
+ // Given a pre-destroy dependence of: A => B
+ // And A has CBD set.
+ //
+ // The resulting graph should be: A => B => A_d
+ //
+ // They key here is that B happens before A is destroyed. This is to
+ // facilitate the primary purpose for CBD: making sure that downstreams
+ // are properly updated to avoid downtime before the resource is destroyed.
+ //
+ // We can't trust that the resource being destroyed or anything that
+ // depends on it is actually in our current graph so we make a new
+ // graph in order to determine those dependencies and add them in.
+ log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
+ depMap, err := t.depMap(destroyMap)
+ if err != nil {
+ return err
+ }
+
+ // We now have the mapping of resource addresses to the destroy
+ // nodes they need to depend on. We now go through our own vertices to
+ // find any matching these addresses and make the connection.
+ for _, v := range g.Vertices() {
+ // We're looking for creators
+ rn, ok := v.(GraphNodeCreator)
+ if !ok {
+ continue
+ }
+
+ // Get the address
+ addr := rn.CreateAddr()
+
+ // If the address has an index, we strip that. Our depMap creation
+ // graph doesn't expand counts so we don't currently get _exact_
+ // dependencies. One day when we limit dependencies more exactly
+ // this will have to change. We have a test case covering this
+ // (depNonCBDCount) so it'll be caught.
+ if addr.Index >= 0 {
+ addr = addr.Copy() // Copy so that we don't modify any pointers
+ addr.Index = -1
+ }
+
+ // If there is nothing this resource should depend on, ignore it
+ key := addr.String()
+ dns, ok := depMap[key]
+ if !ok {
+ continue
+ }
+
+ // We have nodes! Make the connection
+ for _, dn := range dns {
+ log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
+ dag.VertexName(dn), dag.VertexName(v))
+ g.Connect(dag.BasicEdge(dn, v))
+ }
+ }
+
+ return nil
+}
+
+func (t *CBDEdgeTransformer) depMap(
+ destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
+ // Build the graph of our config, this ensures that all resources
+ // are present in the graph.
+ g, err := (&BasicGraphBuilder{
+ Steps: []GraphTransformer{
+ &FlatConfigTransformer{Module: t.Module},
+ &AttachResourceConfigTransformer{Module: t.Module},
+ &AttachStateTransformer{State: t.State},
+ &ReferenceTransformer{},
+ },
+ Name: "CBDEdgeTransformer",
+ }).Build(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Using this graph, build the list of destroy nodes that each resource
+ // address should depend on. For example, when we find B, we map the
+ // address of B to A_d in the "depMap" variable below.
+ depMap := make(map[string][]dag.Vertex)
+ for _, v := range g.Vertices() {
+ // We're looking for resources.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ // Get the address
+ addr := rn.ResourceAddr()
+ key := addr.String()
+
+ // Get the destroy nodes that are destroying this resource.
+ // If there aren't any, then we don't need to worry about
+ // any connections.
+ dns, ok := destroyMap[key]
+ if !ok {
+ continue
+ }
+
+ // Get the nodes that depend on this on. In the example above:
+ // finding B in A => B.
+ for _, v := range g.UpEdges(v).List() {
+ // We're looking for resources.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ // Keep track of the destroy nodes that this address
+ // needs to depend on.
+ key := rn.ResourceAddr().String()
+ depMap[key] = append(depMap[key], dns...)
+ }
+ }
+
+ return depMap, nil
+}
+
+// hasCBDAncestor returns true if any ancestor (node that depends on this)
+// has CBD set.
+func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
+ s, _ := g.Ancestors(v)
+ if s == nil {
+ return true
+ }
+
+ for _, v := range s.List() {
+ dn, ok := v.(GraphNodeDestroyerCBD)
+ if !ok {
+ continue
+ }
+
+ if dn.CreateBeforeDestroy() {
+ // some ancestor is CreateBeforeDestroy, so we need to follow suit
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644
index 00000000..22be1ab6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -0,0 +1,269 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyer must be implemented by nodes that destroy resources.
+type GraphNodeDestroyer interface {
+ dag.Vertex
+
+ // ResourceAddr is the address of the resource that is being
+ // destroyed by this node. If this returns nil, then this node
+ // is not destroying anything.
+ DestroyAddr() *ResourceAddress
+}
+
+// GraphNodeCreator must be implemented by nodes that create OR update resources.
+type GraphNodeCreator interface {
+ // ResourceAddr is the address of the resource being created or updated
+ CreateAddr() *ResourceAddress
+}
+
+// DestroyEdgeTransformer is a GraphTransformer that creates the proper
+// references for destroy resources. Destroy resources are more complex
+// in that they must be depend on the destruction of resources that
+// in turn depend on the CREATION of the node being destroy.
+//
+// That is complicated. Visually:
+//
+// B_d -> A_d -> A -> B
+//
+// Notice that A destroy depends on B destroy, while B create depends on
+// A create. They're inverted. This must be done for example because often
+// dependent resources will block parent resources from deleting. Concrete
+// example: VPC with subnets, the VPC can't be deleted while there are
+// still subnets.
+type DestroyEdgeTransformer struct {
+ // These are needed to properly build the graph of dependencies
+ // to determine what a destroy node depends on. Any of these can be nil.
+ Module *module.Tree
+ State *State
+}
+
+func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
+
+ // Build a map of what is being destroyed (by address string) to
+ // the list of destroyers. In general there will only be one destroyer
+ // but to make it more robust we support multiple.
+ destroyers := make(map[string][]GraphNodeDestroyer)
+ for _, v := range g.Vertices() {
+ dn, ok := v.(GraphNodeDestroyer)
+ if !ok {
+ continue
+ }
+
+ addr := dn.DestroyAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: %s destroying %q",
+ dag.VertexName(dn), key)
+ destroyers[key] = append(destroyers[key], dn)
+ }
+
+ // If we aren't destroying anything, there will be no edges to make
+ // so just exit early and avoid future work.
+ if len(destroyers) == 0 {
+ return nil
+ }
+
+ // Go through and connect creators to destroyers. Going along with
+ // our example, this makes: A_d => A
+ for _, v := range g.Vertices() {
+ cn, ok := v.(GraphNodeCreator)
+ if !ok {
+ continue
+ }
+
+ addr := cn.CreateAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ ds := destroyers[key]
+ if len(ds) == 0 {
+ continue
+ }
+
+ for _, d := range ds {
+ // For illustrating our example
+ a_d := d.(dag.Vertex)
+ a := v
+
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
+ dag.VertexName(a), dag.VertexName(a_d))
+
+ g.Connect(&DestroyEdge{S: a, T: a_d})
+ }
+ }
+
+ // This is strange but is the easiest way to get the dependencies
+ // of a node that is being destroyed. We use another graph to make sure
+ // the resource is in the graph and ask for references. We have to do this
+ // because the node that is being destroyed may NOT be in the graph.
+ //
+ // Example: resource A is force new, then destroy A AND create A are
+ // in the graph. BUT if resource A is just pure destroy, then only
+ // destroy A is in the graph, and create A is not.
+ providerFn := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{NodeAbstractProvider: a}
+ }
+ steps := []GraphTransformer{
+ // Add outputs and metadata
+ &OutputTransformer{Module: t.Module},
+ &AttachResourceConfigTransformer{Module: t.Module},
+ &AttachStateTransformer{State: t.State},
+
+ // Add providers since they can affect destroy order as well
+ &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: t.Module},
+
+ // Add all the variables. We can depend on resources through
+ // variables due to module parameters, and we need to properly
+ // determine that.
+ &RootVariableTransformer{Module: t.Module},
+ &ModuleVariableTransformer{Module: t.Module},
+
+ &ReferenceTransformer{},
+ }
+
+ // Go through all the nodes being destroyed and create a graph.
+ // The resulting graph is only of things being CREATED. For example,
+ // following our example, the resulting graph would be:
+ //
+ // A, B (with no edges)
+ //
+ var tempG Graph
+ var tempDestroyed []dag.Vertex
+ for d, _ := range destroyers {
+ // d is what is being destroyed. We parse the resource address
+ // which it came from it is a panic if this fails.
+ addr, err := ParseResourceAddress(d)
+ if err != nil {
+ panic(err)
+ }
+
+ // This part is a little bit weird but is the best way to
+ // find the dependencies we need to: build a graph and use the
+ // attach config and state transformers then ask for references.
+ abstract := &NodeAbstractResource{Addr: addr}
+ tempG.Add(abstract)
+ tempDestroyed = append(tempDestroyed, abstract)
+
+ // We also add the destroy version here since the destroy can
+ // depend on things that the creation doesn't (destroy provisioners).
+ destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
+ tempG.Add(destroy)
+ tempDestroyed = append(tempDestroyed, destroy)
+ }
+
+ // Run the graph transforms so we have the information we need to
+ // build references.
+ for _, s := range steps {
+ if err := s.Transform(&tempG); err != nil {
+ return err
+ }
+ }
+
+ log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
+
+ // Go through all the nodes in the graph and determine what they
+ // depend on.
+ for _, v := range tempDestroyed {
+ // Find all ancestors of this to determine the edges we'll depend on
+ vs, err := tempG.Ancestors(v)
+ if err != nil {
+ return err
+ }
+
+ refs := make([]dag.Vertex, 0, vs.Len())
+ for _, raw := range vs.List() {
+ refs = append(refs, raw.(dag.Vertex))
+ }
+
+ refNames := make([]string, len(refs))
+ for i, ref := range refs {
+ refNames[i] = dag.VertexName(ref)
+ }
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
+ dag.VertexName(v), refNames)
+
+ // If we have no references, then we won't need to do anything
+ if len(refs) == 0 {
+ continue
+ }
+
+ // Get the destroy node for this. In the example of our struct,
+ // we are currently at B and we're looking for B_d.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ addr := rn.ResourceAddr()
+ if addr == nil {
+ continue
+ }
+
+ dns := destroyers[addr.String()]
+
+ // We have dependencies, check if any are being destroyed
+ // to build the list of things that we must depend on!
+ //
+ // In the example of the struct, if we have:
+ //
+ // B_d => A_d => A => B
+ //
+ // Then at this point in the algorithm we started with B_d,
+ // we built B (to get dependencies), and we found A. We're now looking
+ // to see if A_d exists.
+ var depDestroyers []dag.Vertex
+ for _, v := range refs {
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ addr := rn.ResourceAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ if ds, ok := destroyers[key]; ok {
+ for _, d := range ds {
+ depDestroyers = append(depDestroyers, d.(dag.Vertex))
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
+ key, dag.VertexName(d))
+ }
+ }
+ }
+
+ // Go through and make the connections. Use the variable
+ // names "a_d" and "b_d" to reference our example.
+ for _, a_d := range dns {
+ for _, b_d := range depDestroyers {
+ if b_d != a_d {
+ g.Connect(dag.BasicEdge(b_d, a_d))
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644
index 00000000..ad46d3c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -0,0 +1,86 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DiffTransformer is a GraphTransformer that adds the elements of
+// the diff to the graph.
+//
+// This transform is used for example by the ApplyGraphBuilder to ensure
+// that only resources that are being modified are represented in the graph.
+//
+// Module and State is still required for the DiffTransformer for annotations
+// since the Diff doesn't contain all the information required to build the
+// complete graph (such as create-before-destroy information). The graph
+// is built based on the diff first, though, ensuring that only resources
+// that are being modified are present in the graph.
+type DiffTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Diff *Diff
+ Module *module.Tree
+ State *State
+}
+
+func (t *DiffTransformer) Transform(g *Graph) error {
+ // If the diff is nil or empty (nil is empty) then do nothing
+ if t.Diff.Empty() {
+ return nil
+ }
+
+ // Go through all the modules in the diff.
+ log.Printf("[TRACE] DiffTransformer: starting")
+ var nodes []dag.Vertex
+ for _, m := range t.Diff.Modules {
+ log.Printf("[TRACE] DiffTransformer: Module: %s", m)
+ // TODO: If this is a destroy diff then add a module destroy node
+
+ // Go through all the resources in this module.
+ for name, inst := range m.Resources {
+ log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
+
+ // We have changes! This is a create or update operation.
+ // First grab the address so we have a unique way to
+ // reference this resource.
+ addr, err := parseResourceAddressInternal(name)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing internal name, this is a bug: %q", name))
+ }
+
+ // Very important: add the module path for this resource to
+ // the address. Remove "root" from it.
+ addr.Path = m.Path[1:]
+
+ // If we're destroying, add the destroy node
+ if inst.Destroy || inst.GetDestroyDeposed() {
+ abstract := &NodeAbstractResource{Addr: addr}
+ g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
+ }
+
+ // If we have changes, then add the applyable version
+ if len(inst.Attributes) > 0 {
+ // Add the resource to the graph
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ nodes = append(nodes, node)
+ }
+ }
+ }
+
+ // Add all the nodes to the graph
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644
index 00000000..982c098b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
@@ -0,0 +1,48 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeExapndable is an interface that nodes can implement to
+// signal that they can be expanded. Expanded nodes turn into
+// GraphNodeSubgraph nodes within the graph.
+type GraphNodeExpandable interface {
+ Expand(GraphBuilder) (GraphNodeSubgraph, error)
+}
+
+// GraphNodeDynamicExpandable is an interface that nodes can implement
+// to signal that they can be expanded at eval-time (hence dynamic).
+// These nodes are given the eval context and are expected to return
+// a new subgraph.
+type GraphNodeDynamicExpandable interface {
+ DynamicExpand(EvalContext) (*Graph, error)
+}
+
+// GraphNodeSubgraph is an interface a node can implement if it has
+// a larger subgraph that should be walked.
+type GraphNodeSubgraph interface {
+ Subgraph() dag.Grapher
+}
+
+// ExpandTransform is a transformer that does a subgraph expansion
+// at graph transform time (vs. at eval time). The benefit of earlier
+// subgraph expansion is that errors with the graph build can be detected
+// at an earlier stage.
+type ExpandTransform struct {
+ Builder GraphBuilder
+}
+
+func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
+ ev, ok := v.(GraphNodeExpandable)
+ if !ok {
+ // This isn't an expandable vertex, so just ignore it.
+ return v, nil
+ }
+
+ // Expand the subgraph!
+ log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
+ return ev.Expand(t.Builder)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644
index 00000000..3673771c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ImportProviderValidateTransformer is a GraphTransformer that goes through
+// the providers in the graph and validates that they only depend on variables.
+type ImportProviderValidateTransformer struct{}
+
+func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about providers
+ pv, ok := v.(GraphNodeProvider)
+ if !ok {
+ continue
+ }
+
+ // We only care about providers that reference things
+ rn, ok := pv.(GraphNodeReferencer)
+ if !ok {
+ continue
+ }
+
+ for _, ref := range rn.References() {
+ if !strings.HasPrefix(ref, "var.") {
+ return fmt.Errorf(
+ "Provider %q depends on non-var %q. Providers for import can currently\n"+
+ "only depend on variables or must be hardcoded. You can stop import\n"+
+ "from loading configurations by specifying `-config=\"\"`.",
+ pv.ProviderName(), ref)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644
index 00000000..081df2f8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -0,0 +1,241 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// ImportStateTransformer is a GraphTransformer that adds nodes to the
+// graph to represent the imports we want to do for resources.
+type ImportStateTransformer struct {
+ Targets []*ImportTarget
+}
+
+func (t *ImportStateTransformer) Transform(g *Graph) error {
+ nodes := make([]*graphNodeImportState, 0, len(t.Targets))
+ for _, target := range t.Targets {
+ addr, err := ParseResourceAddress(target.Addr)
+ if err != nil {
+ return fmt.Errorf(
+ "failed to parse resource address '%s': %s",
+ target.Addr, err)
+ }
+
+ nodes = append(nodes, &graphNodeImportState{
+ Addr: addr,
+ ID: target.ID,
+ Provider: target.Provider,
+ })
+ }
+
+ // Build the graph vertices
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
+
+type graphNodeImportState struct {
+ Addr *ResourceAddress // Addr is the resource address to import to
+ ID string // ID is the ID to import as
+ Provider string // Provider string
+
+ states []*InstanceState
+}
+
+func (n *graphNodeImportState) Name() string {
+ return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
+}
+
+func (n *graphNodeImportState) ProvidedBy() []string {
+ return []string{resourceProvider(n.Addr.Type, n.Provider)}
+}
+
+// GraphNodeSubPath
+func (n *graphNodeImportState) Path() []string {
+ return normalizeModulePath(n.Addr.Path)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportState) EvalTree() EvalNode {
+ var provider ResourceProvider
+ info := &InstanceInfo{
+ Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
+ ModulePath: n.Path(),
+ Type: n.Addr.Type,
+ }
+
+ // Reset our states
+ n.states = nil
+
+ // Return our sequence
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalImportState{
+ Provider: &provider,
+ Info: info,
+ Id: n.ID,
+ Output: &n.states,
+ },
+ },
+ }
+}
+
+// GraphNodeDynamicExpandable impl.
+//
+// We use DynamicExpand as a way to generate the subgraph of refreshes
+// and state inserts we need to do for our import state. Since they're new
+// resources they don't depend on anything else and refreshes are isolated
+// so this is nearly a perfect use case for dynamic expand.
+func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ g := &Graph{Path: ctx.Path()}
+
+ // nameCounter is used to de-dup names in the state.
+ nameCounter := make(map[string]int)
+
+ // Compile the list of addresses that we'll be inserting into the state.
+ // We do this ahead of time so we can verify that we aren't importing
+ // something that already exists.
+ addrs := make([]*ResourceAddress, len(n.states))
+ for i, state := range n.states {
+ addr := *n.Addr
+ if t := state.Ephemeral.Type; t != "" {
+ addr.Type = t
+ }
+
+ // Determine if we need to suffix the name to de-dup
+ key := addr.String()
+ count, ok := nameCounter[key]
+ if ok {
+ count++
+ addr.Name += fmt.Sprintf("-%d", count)
+ }
+ nameCounter[key] = count
+
+ // Add it to our list
+ addrs[i] = &addr
+ }
+
+ // Verify that all the addresses are clear
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+ filter := &StateFilter{State: state}
+ for _, addr := range addrs {
+ result, err := filter.Filter(addr.String())
+ if err != nil {
+ return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
+ }
+
+ // Go through the filter results and it is an error if we find
+ // a matching InstanceState, meaning that we would have a collision.
+ for _, r := range result {
+ if _, ok := r.Value.(*InstanceState); ok {
+ return nil, fmt.Errorf(
+ "Can't import %s, would collide with an existing resource.\n\n"+
+ "Please remove or rename this resource before continuing.",
+ addr)
+ }
+ }
+ }
+
+ // For each of the states, we add a node to handle the refresh/add to state.
+ // "n.states" is populated by our own EvalTree with the result of
+ // ImportState. Since DynamicExpand is always called after EvalTree, this
+ // is safe.
+ for i, state := range n.states {
+ g.Add(&graphNodeImportStateSub{
+ Target: addrs[i],
+ Path_: n.Path(),
+ State: state,
+ Provider: n.Provider,
+ })
+ }
+
+ // Root transform for a single root
+ t := &RootTransformer{}
+ if err := t.Transform(g); err != nil {
+ return nil, err
+ }
+
+ // Done!
+ return g, nil
+}
+
+// graphNodeImportStateSub is the sub-node of graphNodeImportState
+// and is part of the subgraph. This node is responsible for refreshing
+// and adding a resource to the state once it is imported.
+type graphNodeImportStateSub struct {
+ Target *ResourceAddress
+ State *InstanceState
+ Path_ []string
+ Provider string
+}
+
+func (n *graphNodeImportStateSub) Name() string {
+ return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
+}
+
+func (n *graphNodeImportStateSub) Path() []string {
+ return n.Path_
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportStateSub) EvalTree() EvalNode {
+ // If the Ephemeral type isn't set, then it is an error
+ if n.State.Ephemeral.Type == "" {
+ err := fmt.Errorf(
+ "import of %s didn't set type for %s",
+ n.Target.String(), n.State.ID)
+ return &EvalReturnError{Error: &err}
+ }
+
+ // DeepCopy so we're only modifying our local copy
+ state := n.State.DeepCopy()
+
+ // Build the resource info
+ info := &InstanceInfo{
+ Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
+ ModulePath: n.Path_,
+ Type: n.State.Ephemeral.Type,
+ }
+
+ // Key is the resource key
+ key := &ResourceStateKey{
+ Name: n.Target.Name,
+ Type: info.Type,
+ Index: n.Target.Index,
+ }
+
+ // The eval sequence
+ var provider ResourceProvider
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: resourceProvider(info.Type, n.Provider),
+ Output: &provider,
+ },
+ &EvalRefresh{
+ Provider: &provider,
+ State: &state,
+ Info: info,
+ Output: &state,
+ },
+ &EvalImportStateVerify{
+ Info: info,
+ Id: n.State.ID,
+ State: &state,
+ },
+ &EvalWriteState{
+ Name: key.String(),
+ ResourceType: info.Type,
+ Provider: resourceProvider(info.Type, n.Provider),
+ State: &state,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644
index 00000000..467950bd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -0,0 +1,120 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ModuleVariableTransformer is a GraphTransformer that adds all the variables
+// in the configuration to the graph.
+//
+// This only adds variables that are referenced by other things in the graph.
+// If a module variable is not referenced, it won't be added to the graph.
+type ModuleVariableTransformer struct {
+ Module *module.Tree
+
+ DisablePrune bool // True if pruning unreferenced should be disabled
+}
+
+func (t *ModuleVariableTransformer) Transform(g *Graph) error {
+ return t.transform(g, nil, t.Module)
+}
+
+func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
+ // If no config, no variables
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children. This must be done BEFORE the transform
+ // above since child module variables can reference parent module variables.
+ for _, c := range m.Children() {
+ if err := t.transform(g, m, c); err != nil {
+ return err
+ }
+ }
+
+ // If we have a parent, we can determine if a module variable is being
+ // used, so we transform this.
+ if parent != nil {
+ if err := t.transformSingle(g, parent, m); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
+ // If we have no vars, we're done!
+ vars := m.Config().Variables
+ if len(vars) == 0 {
+ log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
+ return nil
+ }
+
+ // Look for usage of this module
+ var mod *config.Module
+ for _, modUse := range parent.Config().Modules {
+ if modUse.Name == m.Name() {
+ mod = modUse
+ break
+ }
+ }
+ if mod == nil {
+ log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
+ return nil
+ }
+
+ // Build the reference map so we can determine if we're referencing things.
+ refMap := NewReferenceMap(g.Vertices())
+
+ // Add all variables here
+ for _, v := range vars {
+ // Determine the value of the variable. If it isn't in the
+ // configuration then it was never set and that's not a problem.
+ var value *config.RawConfig
+ if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
+ var err error
+ value, err = config.NewRawConfig(map[string]interface{}{
+ v.Name: raw,
+ })
+ if err != nil {
+ // This shouldn't happen because it is already in
+ // a RawConfig above meaning it worked once before.
+ panic(err)
+ }
+ }
+
+ // Build the node.
+ //
+ // NOTE: For now this is just an "applyable" variable. As we build
+ // new graph builders for the other operations I suspect we'll
+ // find a way to parameterize this, require new transforms, etc.
+ node := &NodeApplyableModuleVariable{
+ PathValue: normalizeModulePath(m.Path()),
+ Config: v,
+ Value: value,
+ Module: t.Module,
+ }
+
+ if !t.DisablePrune {
+ // If the node is not referenced by anything, then we don't need
+ // to include it since it won't be used.
+ if matches := refMap.ReferencedBy(node); len(matches) == 0 {
+ log.Printf(
+ "[INFO] Not including %q in graph, nothing depends on it",
+ dag.VertexName(node))
+ continue
+ }
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644
index 00000000..b256a25b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -0,0 +1,110 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
+// for an expanded count to the graph. The determination of this depends
+// on the count argument given.
+//
+// Orphans are found by comparing the count to what is found in the state.
+// This transform assumes that if an element in the state is within the count
+// bounds given, that it is not an orphan.
+type OrphanResourceCountTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Count int // Actual count of the resource
+ Addr *ResourceAddress // Addr of the resource to look for orphans
+ State *State // Full global state
+}
+
+func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] OrphanResourceCount: Starting...")
+
+ // Grab the module in the state just for this resource address
+ ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
+ if ms == nil {
+ // If no state, there can't be orphans
+ return nil
+ }
+
+ orphanIndex := -1
+ if t.Count == 1 {
+ orphanIndex = 0
+ }
+
+ // Go through the orphans and add them all to the state
+ for key, _ := range ms.Resources {
+ // Build the address
+ addr, err := parseResourceAddressInternal(key)
+ if err != nil {
+ return err
+ }
+ addr.Path = ms.Path[1:]
+
+ // Copy the address for comparison. If we aren't looking at
+ // the same resource, then just ignore it.
+ addrCopy := addr.Copy()
+ addrCopy.Index = -1
+ if !addrCopy.Equals(t.Addr) {
+ continue
+ }
+
+ log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
+
+ idx := addr.Index
+
+ // If we have zero and the index here is 0 or 1, then we
+ // change the index to a high number so that we treat it as
+ // an orphan.
+ if t.Count <= 0 && idx <= 0 {
+ idx = t.Count + 1
+ }
+
+ // If we have a count greater than 0 and we're at the zero index,
+ // we do a special case check to see if our state also has a
+ // -1 index value. If so, this is an orphan because our rules are
+ // that if both a -1 and 0 are in the state, the 0 is destroyed.
+ if t.Count > 0 && idx == orphanIndex {
+ // This is a piece of cleverness (beware), but its simple:
+ // if orphanIndex is 0, then check -1, else check 0.
+ checkIndex := (orphanIndex + 1) * -1
+
+ key := &ResourceStateKey{
+ Name: addr.Name,
+ Type: addr.Type,
+ Mode: addr.Mode,
+ Index: checkIndex,
+ }
+
+ if _, ok := ms.Resources[key.String()]; ok {
+ // We have a -1 index, too. Make an arbitrarily high
+ // index so that we always mark this as an orphan.
+ log.Printf(
+ "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
+ addr, orphanIndex)
+ idx = t.Count + 1
+ }
+ }
+
+ // If the index is within the count bounds, it is not an orphan
+ if idx < t.Count {
+ continue
+ }
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644
index 00000000..49568d5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -0,0 +1,64 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// OrphanOutputTransformer finds the outputs that aren't present
+// in the given config that are in the state and adds them to the graph
+// for deletion.
+type OrphanOutputTransformer struct {
+ Module *module.Tree // Root module
+ State *State // State is the root state
+}
+
+func (t *OrphanOutputTransformer) Transform(g *Graph) error {
+ if t.State == nil {
+ log.Printf("[DEBUG] No state, no orphan outputs")
+ return nil
+ }
+
+ return t.transform(g, t.Module)
+}
+
+func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
+ // Get our configuration, and recurse into children
+ var c *config.Config
+ if m != nil {
+ c = m.Config()
+ for _, child := range m.Children() {
+ if err := t.transform(g, child); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Get the state. If there is no state, then we have no orphans!
+ path := normalizeModulePath(m.Path())
+ state := t.State.ModuleByPath(path)
+ if state == nil {
+ return nil
+ }
+
+ // Make a map of the valid outputs
+ valid := make(map[string]struct{})
+ for _, o := range c.Outputs {
+ valid[o.Name] = struct{}{}
+ }
+
+ // Go through the outputs and find the ones that aren't in our config.
+ for n, _ := range state.Outputs {
+ // If it is in the valid map, then ignore
+ if _, ok := valid[n]; ok {
+ continue
+ }
+
+ // Orphan!
+ g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644
index 00000000..e42d3c84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceTransformer is a GraphTransformer that adds resource
+// orphans to the graph. A resource orphan is a resource that is
+// represented in the state but not in the configuration.
+//
+// This only adds orphans that have no representation at all in the
+// configuration.
+type OrphanResourceTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ // State is the global state. We require the global state to
+ // properly find module orphans at our path.
+ State *State
+
+ // Module is the root module. We'll look up the proper configuration
+ // using the graph path.
+ Module *module.Tree
+}
+
+func (t *OrphanResourceTransformer) Transform(g *Graph) error {
+ if t.State == nil {
+ // If the entire state is nil, there can't be any orphans
+ return nil
+ }
+
+ // Go through the modules and for each module transform in order
+ // to add the orphan.
+ for _, ms := range t.State.Modules {
+ if err := t.transform(g, ms); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
+ if ms == nil {
+ return nil
+ }
+
+ // Get the configuration for this path. The configuration might be
+ // nil if the module was removed from the configuration. This is okay,
+ // this just means that every resource is an orphan.
+ var c *config.Config
+ if m := t.Module.Child(ms.Path[1:]); m != nil {
+ c = m.Config()
+ }
+
+ // Go through the orphans and add them all to the state
+ for _, key := range ms.Orphans(c) {
+ // Build the abstract resource
+ addr, err := parseResourceAddressInternal(key)
+ if err != nil {
+ return err
+ }
+ addr.Path = ms.Path[1:]
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644
index 00000000..b260f4ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -0,0 +1,59 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// OutputTransformer is a GraphTransformer that adds all the outputs
+// in the configuration to the graph.
+//
+// This is done for the apply graph builder even if dependent nodes
+// aren't changing since there is no downside: the state will be available
+// even if the dependent items aren't changing.
+type OutputTransformer struct {
+ Module *module.Tree
+}
+
+func (t *OutputTransformer) Transform(g *Graph) error {
+ return t.transform(g, t.Module)
+}
+
+func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no config, no outputs
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children. We must do this first because
+ // we can reference module outputs and they must show up in the
+ // reference map.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ // If we have no outputs, we're done!
+ os := m.Config().Outputs
+ if len(os) == 0 {
+ return nil
+ }
+
+ // Add all outputs here
+ for _, o := range os {
+ // Build the node.
+ //
+ // NOTE: For now this is just an "applyable" output. As we build
+ // new graph builders for the other operations I suspect we'll
+ // find a way to parameterize this, require new transforms, etc.
+ node := &NodeApplyableOutput{
+ PathValue: normalizeModulePath(m.Path()),
+ Config: o,
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644
index 00000000..b9695d52
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -0,0 +1,380 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvider is an interface that nodes that can be a provider
+// must implement. The ProviderName returned is the name of the provider
+// they satisfy.
+type GraphNodeProvider interface {
+ ProviderName() string
+}
+
+// GraphNodeCloseProvider is an interface that nodes that can be a close
+// provider must implement. The CloseProviderName returned is the name of
+// the provider they satisfy.
+type GraphNodeCloseProvider interface {
+ CloseProviderName() string
+}
+
+// GraphNodeProviderConsumer is an interface that nodes that require
+// a provider must implement. ProvidedBy must return the name of the provider
+// to use.
+type GraphNodeProviderConsumer interface {
+ ProvidedBy() []string
+}
+
+// ProviderTransformer is a GraphTransformer that maps resources to
+// providers within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProviderTransformer struct{}
+
+func (t *ProviderTransformer) Transform(g *Graph) error {
+ // Go through the other nodes and match them to providers they need
+ var err error
+ m := providerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProviderConsumer); ok {
+ for _, p := range pv.ProvidedBy() {
+ target := m[providerMapKey(p, pv)]
+ if target == nil {
+ println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provider %s couldn't be found",
+ dag.VertexName(v), p))
+ continue
+ }
+
+ g.Connect(dag.BasicEdge(v, target))
+ }
+ }
+ }
+
+ return err
+}
+
+// CloseProviderTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provider connections that aren't needed anymore.
+// A provider connection is not needed anymore once all depended resources
+// in the graph are evaluated.
+type CloseProviderTransformer struct{}
+
+func (t *CloseProviderTransformer) Transform(g *Graph) error {
+ pm := providerVertexMap(g)
+ cpm := closeProviderVertexMap(g)
+ var err error
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProviderConsumer); ok {
+ for _, p := range pv.ProvidedBy() {
+ key := p
+ source := cpm[key]
+
+ if source == nil {
+ // Create a new graphNodeCloseProvider and add it to the graph
+ source = &graphNodeCloseProvider{ProviderNameValue: p}
+ g.Add(source)
+
+ // Close node needs to depend on provider
+ provider, ok := pm[key]
+ if !ok {
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provider %s couldn't be found for closing",
+ dag.VertexName(v), p))
+ continue
+ }
+ g.Connect(dag.BasicEdge(source, provider))
+
+ // Make sure we also add the new graphNodeCloseProvider to the map
+ // so we don't create and add any duplicate graphNodeCloseProviders.
+ cpm[key] = source
+ }
+
+ // Close node depends on all nodes provided by the provider
+ g.Connect(dag.BasicEdge(source, v))
+ }
+ }
+ }
+
+ return err
+}
+
+// MissingProviderTransformer is a GraphTransformer that adds nodes
+// for missing providers into the graph. Specifically, it creates provider
+// configuration nodes for all the providers that we support. These are
+// pruned later during an optimization pass.
+type MissingProviderTransformer struct {
+ // Providers is the list of providers we support.
+ Providers []string
+
+ // AllowAny will not check that a provider is supported before adding
+ // it to the graph.
+ AllowAny bool
+
+ // Concrete, if set, overrides how the providers are made.
+ Concrete ConcreteProviderNodeFunc
+}
+
+func (t *MissingProviderTransformer) Transform(g *Graph) error {
+ // Initialize factory
+ if t.Concrete == nil {
+ t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
+ return a
+ }
+ }
+
+ // Create a set of our supported providers
+ supported := make(map[string]struct{}, len(t.Providers))
+ for _, v := range t.Providers {
+ supported[v] = struct{}{}
+ }
+
+ // Get the map of providers we already have in our graph
+ m := providerVertexMap(g)
+
+ // Go through all the provider consumers and make sure we add
+ // that provider if it is missing. We use a for loop here instead
+ // of "range" since we'll modify check as we go to add more to check.
+ check := g.Vertices()
+ for i := 0; i < len(check); i++ {
+ v := check[i]
+
+ pv, ok := v.(GraphNodeProviderConsumer)
+ if !ok {
+ continue
+ }
+
+ // If this node has a subpath, then we use that as a prefix
+ // into our map to check for an existing provider.
+ var path []string
+ if sp, ok := pv.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ path = raw
+ }
+ }
+
+ for _, p := range pv.ProvidedBy() {
+ key := providerMapKey(p, pv)
+ if _, ok := m[key]; ok {
+ // This provider already exists as a configure node
+ continue
+ }
+
+ // If the provider has an alias in it, we just want the type
+ ptype := p
+ if idx := strings.IndexRune(p, '.'); idx != -1 {
+ ptype = p[:idx]
+ }
+
+ if !t.AllowAny {
+ if _, ok := supported[ptype]; !ok {
+ // If we don't support the provider type, skip it.
+ // Validation later will catch this as an error.
+ continue
+ }
+ }
+
+ // Add the missing provider node to the graph
+ v := t.Concrete(&NodeAbstractProvider{
+ NameValue: p,
+ PathValue: path,
+ }).(dag.Vertex)
+ if len(path) > 0 {
+ // We'll need the parent provider as well, so let's
+ // add a dummy node to check to make sure that we add
+ // that parent provider.
+ check = append(check, &graphNodeProviderConsumerDummy{
+ ProviderValue: p,
+ PathValue: path[:len(path)-1],
+ })
+ }
+
+ m[key] = g.Add(v)
+ }
+ }
+
+ return nil
+}
+
+// ParentProviderTransformer connects provider nodes to their parents.
+//
+// This works by finding nodes that are both GraphNodeProviders and
+// GraphNodeSubPath. It then connects the providers to their parent
+// path.
+type ParentProviderTransformer struct{}
+
+func (t *ParentProviderTransformer) Transform(g *Graph) error {
+ // Make a mapping of path to dag.Vertex, where path is: "path.name"
+ m := make(map[string]dag.Vertex)
+
+ // Also create a map that maps a provider to its parent
+ parentMap := make(map[dag.Vertex]string)
+ for _, raw := range g.Vertices() {
+ // If it is the flat version, then make it the non-flat version.
+ // We eventually want to get rid of the flat version entirely so
+ // this is a stop-gap while it still exists.
+ var v dag.Vertex = raw
+
+ // Only care about providers
+ pn, ok := v.(GraphNodeProvider)
+ if !ok || pn.ProviderName() == "" {
+ continue
+ }
+
+ // Also require a subpath, if there is no subpath then we
+ // just totally ignore it. The expectation of this transform is
+ // that it is used with a graph builder that is already flattened.
+ var path []string
+ if pn, ok := raw.(GraphNodeSubPath); ok {
+ path = pn.Path()
+ }
+ path = normalizeModulePath(path)
+
+ // Build the key with path.name i.e. "child.subchild.aws"
+ key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+ m[key] = raw
+
+ // Determine the parent if we're non-root. This is length 1 since
+ // the 0 index should be "root" since we normalize above.
+ if len(path) > 1 {
+ path = path[:len(path)-1]
+ key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+ parentMap[raw] = key
+ }
+ }
+
+ // Connect!
+ for v, key := range parentMap {
+ if parent, ok := m[key]; ok {
+ g.Connect(dag.BasicEdge(v, parent))
+ }
+ }
+
+ return nil
+}
+
+// PruneProviderTransformer is a GraphTransformer that prunes all the
+// providers that aren't needed from the graph. A provider is unneeded if
+// no resource or module is using that provider.
+type PruneProviderTransformer struct{}
+
+func (t *PruneProviderTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about the providers
+ if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
+ continue
+ }
+ // Does anything depend on this? If not, then prune it.
+ if s := g.UpEdges(v); s.Len() == 0 {
+ if nv, ok := v.(dag.NamedVertex); ok {
+ log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
+ }
+ g.Remove(v)
+ }
+ }
+
+ return nil
+}
+
+// providerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as providerVertexMap.
+func providerMapKey(k string, v dag.Vertex) string {
+ pathPrefix := ""
+ if sp, ok := v.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ pathPrefix = modulePrefixStr(raw) + "."
+ }
+ }
+
+ return pathPrefix + k
+}
+
+func providerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvider); ok {
+ key := providerMapKey(pv.ProviderName(), v)
+ m[key] = v
+ }
+ }
+
+ return m
+}
+
+func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeCloseProvider); ok {
+ m[pv.CloseProviderName()] = v
+ }
+ }
+
+ return m
+}
+
+type graphNodeCloseProvider struct {
+ ProviderNameValue string
+}
+
+func (n *graphNodeCloseProvider) Name() string {
+ return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvider) EvalTree() EvalNode {
+ return CloseProviderEvalTree(n.ProviderNameValue)
+}
+
+// GraphNodeDependable impl.
+func (n *graphNodeCloseProvider) DependableName() []string {
+ return []string{n.Name()}
+}
+
+func (n *graphNodeCloseProvider) CloseProviderName() string {
+ return n.ProviderNameValue
+}
+
+// GraphNodeDotter impl.
+func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ if !opts.Verbose {
+ return nil
+ }
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "diamond",
+ },
+ }
+}
+
+// RemovableIfNotTargeted
+func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// graphNodeProviderConsumerDummy is a struct that never enters the real
+// graph (though it could to no ill effect). It implements
+// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
+// certain transformations.
+type graphNodeProviderConsumerDummy struct {
+ ProviderValue string
+ PathValue []string
+}
+
+func (n *graphNodeProviderConsumerDummy) Path() []string {
+ return n.PathValue
+}
+
+func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
+ return []string{n.ProviderValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644
index 00000000..d9919f3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
@@ -0,0 +1,50 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DisableProviderTransformer "disables" any providers that are not actually
+// used by anything. This avoids the provider being initialized and configured.
+// This both saves resources but also avoids errors since configuration
+// may imply initialization which may require auth.
+type DisableProviderTransformer struct{}
+
+func (t *DisableProviderTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about providers
+ pn, ok := v.(GraphNodeProvider)
+ if !ok || pn.ProviderName() == "" {
+ continue
+ }
+
+ // If we have dependencies, then don't disable
+ if g.UpEdges(v).Len() > 0 {
+ continue
+ }
+
+ // Get the path
+ var path []string
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ path = pn.Path()
+ }
+
+ // Disable the provider by replacing it with a "disabled" provider
+ disabled := &NodeDisabledProvider{
+ NodeAbstractProvider: &NodeAbstractProvider{
+ NameValue: pn.ProviderName(),
+ PathValue: path,
+ },
+ }
+
+ if !g.Replace(v, disabled) {
+ panic(fmt.Sprintf(
+ "vertex disappeared from under us: %s",
+ dag.VertexName(v)))
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644
index 00000000..f49d8241
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -0,0 +1,206 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvisioner is an interface that nodes that can be a provisioner
+// must implement. The ProvisionerName returned is the name of the provisioner
+// they satisfy.
+type GraphNodeProvisioner interface {
+ ProvisionerName() string
+}
+
+// GraphNodeCloseProvisioner is an interface that nodes that can be a close
+// provisioner must implement. The CloseProvisionerName returned is the name
+// of the provisioner they satisfy.
+type GraphNodeCloseProvisioner interface {
+ CloseProvisionerName() string
+}
+
+// GraphNodeProvisionerConsumer is an interface that nodes that require
+// a provisioner must implement. ProvisionedBy must return the name of the
+// provisioner to use.
+type GraphNodeProvisionerConsumer interface {
+ ProvisionedBy() []string
+}
+
+// ProvisionerTransformer is a GraphTransformer that maps resources to
+// provisioners within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProvisionerTransformer struct{}
+
+func (t *ProvisionerTransformer) Transform(g *Graph) error {
+ // Go through the other nodes and match them to provisioners they need
+ var err error
+ m := provisionerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+ for _, p := range pv.ProvisionedBy() {
+ key := provisionerMapKey(p, pv)
+ if m[key] == nil {
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provisioner %s couldn't be found",
+ dag.VertexName(v), p))
+ continue
+ }
+
+ g.Connect(dag.BasicEdge(v, m[key]))
+ }
+ }
+ }
+
+ return err
+}
+
+// MissingProvisionerTransformer is a GraphTransformer that adds nodes
+// for missing provisioners into the graph.
+type MissingProvisionerTransformer struct {
+ // Provisioners is the list of provisioners we support.
+ Provisioners []string
+}
+
+func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
+ // Create a set of our supported provisioners
+ supported := make(map[string]struct{}, len(t.Provisioners))
+ for _, v := range t.Provisioners {
+ supported[v] = struct{}{}
+ }
+
+ // Get the map of provisioners we already have in our graph
+ m := provisionerVertexMap(g)
+
+ // Go through all the provisioner consumers and make sure we add
+ // that provisioner if it is missing.
+ for _, v := range g.Vertices() {
+ pv, ok := v.(GraphNodeProvisionerConsumer)
+ if !ok {
+ continue
+ }
+
+ // If this node has a subpath, then we use that as a prefix
+ // into our map to check for an existing provider.
+ var path []string
+ if sp, ok := pv.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ path = raw
+ }
+ }
+
+ for _, p := range pv.ProvisionedBy() {
+ // Build the key for storing in the map
+ key := provisionerMapKey(p, pv)
+
+ if _, ok := m[key]; ok {
+ // This provisioner already exists as a configure node
+ continue
+ }
+
+ if _, ok := supported[p]; !ok {
+ // If we don't support the provisioner type, skip it.
+ // Validation later will catch this as an error.
+ continue
+ }
+
+ // Build the vertex
+ var newV dag.Vertex = &NodeProvisioner{
+ NameValue: p,
+ PathValue: path,
+ }
+
+ // Add the missing provisioner node to the graph
+ m[key] = g.Add(newV)
+ }
+ }
+
+ return nil
+}
+
+// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provisioner connections that aren't needed
+// anymore. A provisioner connection is not needed anymore once all depended
+// resources in the graph are evaluated.
+type CloseProvisionerTransformer struct{}
+
+func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
+ m := closeProvisionerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+ for _, p := range pv.ProvisionedBy() {
+ source := m[p]
+
+ if source == nil {
+ // Create a new graphNodeCloseProvisioner and add it to the graph
+ source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
+ g.Add(source)
+
+ // Make sure we also add the new graphNodeCloseProvisioner to the map
+ // so we don't create and add any duplicate graphNodeCloseProvisioners.
+ m[p] = source
+ }
+
+ g.Connect(dag.BasicEdge(source, v))
+ }
+ }
+ }
+
+ return nil
+}
+
+// provisionerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as provisionerVertexMap.
+func provisionerMapKey(k string, v dag.Vertex) string {
+ pathPrefix := ""
+ if sp, ok := v.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ pathPrefix = modulePrefixStr(raw) + "."
+ }
+ }
+
+ return pathPrefix + k
+}
+
+func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisioner); ok {
+ key := provisionerMapKey(pv.ProvisionerName(), v)
+ m[key] = v
+ }
+ }
+
+ return m
+}
+
+func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeCloseProvisioner); ok {
+ m[pv.CloseProvisionerName()] = v
+ }
+ }
+
+ return m
+}
+
+type graphNodeCloseProvisioner struct {
+ ProvisionerNameValue string
+}
+
+func (n *graphNodeCloseProvisioner) Name() string {
+ return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
+ return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
+}
+
+func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
+ return n.ProvisionerNameValue
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644
index 00000000..c5452354
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -0,0 +1,321 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeReferenceable must be implemented by any node that represents
+// a Terraform thing that can be referenced (resource, module, etc.).
+//
+// Even if the thing has no name, this should return an empty list. By
+// implementing this and returning a non-nil result, you say that this CAN
+// be referenced and other methods of referencing may still be possible (such
+// as by path!)
+type GraphNodeReferenceable interface {
+ // ReferenceableName is the name by which this can be referenced.
+ // This can be either just the type, or include the field. Example:
+ // "aws_instance.bar" or "aws_instance.bar.id".
+ ReferenceableName() []string
+}
+
+// GraphNodeReferencer must be implemented by nodes that reference other
+// Terraform items and therefore depend on them.
+type GraphNodeReferencer interface {
+ // References are the list of things that this node references. This
+ // can include fields or just the type, just like GraphNodeReferenceable
+ // above.
+ References() []string
+}
+
+// GraphNodeReferenceGlobal is an interface that can optionally be
+// implemented. If ReferenceGlobal returns true, then the References()
+// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
+// etc.
+//
+// This allows a node to reference and be referenced by a specific name
+// that may cross module boundaries. This can be very dangerous so use
+// this wisely.
+//
+// The primary use case for this is module boundaries (variables coming in).
+type GraphNodeReferenceGlobal interface {
+ // Set to true to signal that references and name are fully
+ // qualified. See the above docs for more information.
+ ReferenceGlobal() bool
+}
+
+// ReferenceTransformer is a GraphTransformer that connects all the
+// nodes that reference each other in order to form the proper ordering.
+type ReferenceTransformer struct{}
+
+func (t *ReferenceTransformer) Transform(g *Graph) error {
+ // Build a reference map so we can efficiently look up the references
+ vs := g.Vertices()
+ m := NewReferenceMap(vs)
+
+ // Find the things that reference things and connect them
+ for _, v := range vs {
+ parents, _ := m.References(v)
+ parentsDbg := make([]string, len(parents))
+ for i, v := range parents {
+ parentsDbg[i] = dag.VertexName(v)
+ }
+ log.Printf(
+ "[DEBUG] ReferenceTransformer: %q references: %v",
+ dag.VertexName(v), parentsDbg)
+
+ for _, parent := range parents {
+ g.Connect(dag.BasicEdge(v, parent))
+ }
+ }
+
+ return nil
+}
+
+// ReferenceMap is a structure that can be used to efficiently check
+// for references on a graph.
+type ReferenceMap struct {
+ // m is the mapping of referenceable name to list of verticies that
+ // implement that name. This is built on initialization.
+ references map[string][]dag.Vertex
+ referencedBy map[string][]dag.Vertex
+}
+
+// References returns the list of vertices that this vertex
+// references along with any missing references.
+func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
+ rn, ok := v.(GraphNodeReferencer)
+ if !ok {
+ return nil, nil
+ }
+
+ var matches []dag.Vertex
+ var missing []string
+ prefix := m.prefix(v)
+ for _, ns := range rn.References() {
+ found := false
+ for _, n := range strings.Split(ns, "/") {
+ n = prefix + n
+ parents, ok := m.references[n]
+ if !ok {
+ continue
+ }
+
+ // Mark that we found a match
+ found = true
+
+ // Make sure this isn't a self reference, which isn't included
+ selfRef := false
+ for _, p := range parents {
+ if p == v {
+ selfRef = true
+ break
+ }
+ }
+ if selfRef {
+ continue
+ }
+
+ matches = append(matches, parents...)
+ break
+ }
+
+ if !found {
+ missing = append(missing, ns)
+ }
+ }
+
+ return matches, missing
+}
+
+// ReferencedBy returns the list of vertices that reference the
+// vertex passed in.
+func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
+ rn, ok := v.(GraphNodeReferenceable)
+ if !ok {
+ return nil
+ }
+
+ var matches []dag.Vertex
+ prefix := m.prefix(v)
+ for _, n := range rn.ReferenceableName() {
+ n = prefix + n
+ children, ok := m.referencedBy[n]
+ if !ok {
+ continue
+ }
+
+ // Make sure this isn't a self reference, which isn't included
+ selfRef := false
+ for _, p := range children {
+ if p == v {
+ selfRef = true
+ break
+ }
+ }
+ if selfRef {
+ continue
+ }
+
+ matches = append(matches, children...)
+ }
+
+ return matches
+}
+
+func (m *ReferenceMap) prefix(v dag.Vertex) string {
+ // If the node is stating it is already fully qualified then
+ // we don't have to create the prefix!
+ if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
+ return ""
+ }
+
+ // Create the prefix based on the path
+ var prefix string
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ if path := normalizeModulePath(pn.Path()); len(path) > 1 {
+ prefix = modulePrefixStr(path) + "."
+ }
+ }
+
+ return prefix
+}
+
+// NewReferenceMap is used to create a new reference map for the
+// given set of vertices.
+func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
+ var m ReferenceMap
+
+ // Build the lookup table
+ refMap := make(map[string][]dag.Vertex)
+ for _, v := range vs {
+ // We're only looking for referenceable nodes
+ rn, ok := v.(GraphNodeReferenceable)
+ if !ok {
+ continue
+ }
+
+ // Go through and cache them
+ prefix := m.prefix(v)
+ for _, n := range rn.ReferenceableName() {
+ n = prefix + n
+ refMap[n] = append(refMap[n], v)
+ }
+
+ // If there is a path, it is always referenceable by that. For
+ // example, if this is a referenceable thing at path []string{"foo"},
+ // then it can be referenced at "module.foo"
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ for _, p := range ReferenceModulePath(pn.Path()) {
+ refMap[p] = append(refMap[p], v)
+ }
+ }
+ }
+
+ // Build the lookup table for referenced by
+ refByMap := make(map[string][]dag.Vertex)
+ for _, v := range vs {
+ // We're only looking for referenceable nodes
+ rn, ok := v.(GraphNodeReferencer)
+ if !ok {
+ continue
+ }
+
+ // Go through and cache them
+ prefix := m.prefix(v)
+ for _, n := range rn.References() {
+ n = prefix + n
+ refByMap[n] = append(refByMap[n], v)
+ }
+ }
+
+ m.references = refMap
+ m.referencedBy = refByMap
+ return &m
+}
+
+// Returns the reference name for a module path. The path "foo" would return
+// "module.foo". If this is a deeply nested module, it will be every parent
+// as well. For example: ["foo", "bar"] would return both "module.foo" and
+// "module.foo.module.bar"
+func ReferenceModulePath(p []string) []string {
+ p = normalizeModulePath(p)
+ if len(p) == 1 {
+ // Root, no name
+ return nil
+ }
+
+ result := make([]string, 0, len(p)-1)
+ for i := len(p); i > 1; i-- {
+ result = append(result, modulePrefixStr(p[:i]))
+ }
+
+ return result
+}
+
+// ReferencesFromConfig returns the references that a configuration has
+// based on the interpolated variables in a configuration.
+func ReferencesFromConfig(c *config.RawConfig) []string {
+ var result []string
+ for _, v := range c.Variables {
+ if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
+ result = append(result, r...)
+ }
+ }
+
+ return result
+}
+
+// ReferenceFromInterpolatedVar returns the reference from this variable,
+// or an empty string if there is no reference.
+func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
+ switch v := v.(type) {
+ case *config.ModuleVariable:
+ return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
+ case *config.ResourceVariable:
+ id := v.ResourceId()
+
+ // If we have a multi-reference (splat), then we depend on ALL
+ // resources with this type/name.
+ if v.Multi && v.Index == -1 {
+ return []string{fmt.Sprintf("%s.*", id)}
+ }
+
+ // Otherwise, we depend on a specific index.
+ idx := v.Index
+ if !v.Multi || v.Index == -1 {
+ idx = 0
+ }
+
+ // Depend on the index, as well as "N" which represents the
+ // un-expanded set of resources.
+ return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
+ case *config.UserVariable:
+ return []string{fmt.Sprintf("var.%s", v.Name)}
+ default:
+ return nil
+ }
+}
+
+func modulePrefixStr(p []string) string {
+ parts := make([]string, 0, len(p)*2)
+ for _, p := range p[1:] {
+ parts = append(parts, "module", p)
+ }
+
+ return strings.Join(parts, ".")
+}
+
+func modulePrefixList(result []string, prefix string) []string {
+ if prefix != "" {
+ for i, v := range result {
+ result[i] = fmt.Sprintf("%s.%s", prefix, v)
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644
index 00000000..cda35cb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -0,0 +1,51 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ResourceCountTransformer is a GraphTransformer that expands the count
+// out for a specific resource.
+//
+// This assumes that the count is already interpolated.
+type ResourceCountTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Count int
+ Addr *ResourceAddress
+}
+
+func (t *ResourceCountTransformer) Transform(g *Graph) error {
+ // Don't allow the count to be negative
+ if t.Count < 0 {
+ return fmt.Errorf("negative count: %d", t.Count)
+ }
+
+ // For each count, build and add the node
+ for i := 0; i < t.Count; i++ {
+ // Set the index. If our count is 1 we special case it so that
+ // we handle the "resource.0" and "resource" boundary properly.
+ index := i
+ if t.Count == 1 {
+ index = -1
+ }
+
+ // Build the resource address
+ addr := t.Addr.Copy()
+ addr.Index = index
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644
index 00000000..aee053d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
@@ -0,0 +1,38 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+const rootNodeName = "root"
+
+// RootTransformer is a GraphTransformer that adds a root to the graph.
+type RootTransformer struct{}
+
+func (t *RootTransformer) Transform(g *Graph) error {
+ // If we already have a good root, we're done
+ if _, err := g.Root(); err == nil {
+ return nil
+ }
+
+ // Add a root
+ var root graphNodeRoot
+ g.Add(root)
+
+ // Connect the root to all the edges that need it
+ for _, v := range g.Vertices() {
+ if v == root {
+ continue
+ }
+
+ if g.UpEdges(v).Len() == 0 {
+ g.Connect(dag.BasicEdge(root, v))
+ }
+ }
+
+ return nil
+}
+
+type graphNodeRoot struct{}
+
+func (n graphNodeRoot) Name() string {
+ return rootNodeName
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644
index 00000000..471cd746
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// StateTransformer is a GraphTransformer that adds the elements of
+// the state to the graph.
+//
+// This transform is used for example by the DestroyPlanGraphBuilder to ensure
+// that only resources that are in the state are represented in the graph.
+type StateTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ State *State
+}
+
+func (t *StateTransformer) Transform(g *Graph) error {
+ // If the state is nil or empty (nil is empty) then do nothing
+ if t.State.Empty() {
+ return nil
+ }
+
+ // Go through all the modules in the diff.
+ log.Printf("[TRACE] StateTransformer: starting")
+ var nodes []dag.Vertex
+ for _, ms := range t.State.Modules {
+ log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
+
+ // Go through all the resources in this module.
+ for name, rs := range ms.Resources {
+ log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
+
+ // Add the resource to the graph
+ addr, err := parseResourceAddressInternal(name)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing internal name, this is a bug: %q", name))
+ }
+
+ // Very important: add the module path for this resource to
+ // the address. Remove "root" from it.
+ addr.Path = ms.Path[1:]
+
+ // Add the resource to the graph
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ nodes = append(nodes, node)
+ }
+ }
+
+ // Add all the nodes to the graph
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644
index 00000000..225ac4b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -0,0 +1,144 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeTargetable is an interface for graph nodes to implement when they
+// need to be told about incoming targets. This is useful for nodes that need
+// to respect targets as they dynamically expand. Note that the list of targets
+// provided will contain every target provided, and each implementing graph
+// node must filter this list to targets considered relevant.
+type GraphNodeTargetable interface {
+ SetTargets([]ResourceAddress)
+}
+
+// TargetsTransformer is a GraphTransformer that, when the user specifies a
+// list of resources to target, limits the graph to only those resources and
+// their dependencies.
+type TargetsTransformer struct {
+ // List of targeted resource names specified by the user
+ Targets []string
+
+ // List of parsed targets, provided by callers like ResourceCountTransform
+ // that already have the targets parsed
+ ParsedTargets []ResourceAddress
+
+ // Set to true when we're in a `terraform destroy` or a
+ // `terraform plan -destroy`
+ Destroy bool
+}
+
+func (t *TargetsTransformer) Transform(g *Graph) error {
+ if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
+ addrs, err := t.parseTargetAddresses()
+ if err != nil {
+ return err
+ }
+
+ t.ParsedTargets = addrs
+ }
+
+ if len(t.ParsedTargets) > 0 {
+ targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
+ if err != nil {
+ return err
+ }
+
+ for _, v := range g.Vertices() {
+ removable := false
+ if _, ok := v.(GraphNodeResource); ok {
+ removable = true
+ }
+ if vr, ok := v.(RemovableIfNotTargeted); ok {
+ removable = vr.RemoveIfNotTargeted()
+ }
+ if removable && !targetedNodes.Include(v) {
+ log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
+ g.Remove(v)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
+ addrs := make([]ResourceAddress, len(t.Targets))
+ for i, target := range t.Targets {
+ ta, err := ParseResourceAddress(target)
+ if err != nil {
+ return nil, err
+ }
+ addrs[i] = *ta
+ }
+
+ return addrs, nil
+}
+
+// Returns the list of targeted nodes. A targeted node is either addressed
+// directly, or is an Ancestor of a targeted node. Destroy mode keeps
+// Descendents instead of Ancestors.
+func (t *TargetsTransformer) selectTargetedNodes(
+ g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
+ targetedNodes := new(dag.Set)
+ for _, v := range g.Vertices() {
+ if t.nodeIsTarget(v, addrs) {
+ targetedNodes.Add(v)
+
+ // We inform nodes that ask about the list of targets - helps for nodes
+ // that need to dynamically expand. Note that this only occurs for nodes
+ // that are already directly targeted.
+ if tn, ok := v.(GraphNodeTargetable); ok {
+ tn.SetTargets(addrs)
+ }
+
+ var deps *dag.Set
+ var err error
+ if t.Destroy {
+ deps, err = g.Descendents(v)
+ } else {
+ deps, err = g.Ancestors(v)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ for _, d := range deps.List() {
+ targetedNodes.Add(d)
+ }
+ }
+ }
+
+ return targetedNodes, nil
+}
+
+func (t *TargetsTransformer) nodeIsTarget(
+ v dag.Vertex, addrs []ResourceAddress) bool {
+ r, ok := v.(GraphNodeResource)
+ if !ok {
+ return false
+ }
+
+ addr := r.ResourceAddr()
+ for _, targetAddr := range addrs {
+ if targetAddr.Equals(addr) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// RemovableIfNotTargeted is a special interface for graph nodes that
+// aren't directly addressable, but need to be removed from the graph when they
+// are not targeted. (Nodes that are not directly targeted end up in the set of
+// targeted nodes because something that _is_ targeted depends on them.) The
+// initial use case for this interface is GraphNodeConfigVariable, which was
+// having trouble interpolating for module variables in targeted scenarios that
+// filtered out the resource node being referenced.
+type RemovableIfNotTargeted interface {
+ RemoveIfNotTargeted() bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644
index 00000000..21842789
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
@@ -0,0 +1,20 @@
+package terraform
+
+// TransitiveReductionTransformer is a GraphTransformer that performs
+// finds the transitive reduction of the graph. For a definition of
+// transitive reduction, see Wikipedia.
+type TransitiveReductionTransformer struct{}
+
+func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
+ // If the graph isn't valid, skip the transitive reduction.
+ // We don't error here because Terraform itself handles graph
+ // validation in a better way, or we assume it does.
+ if err := g.Validate(); err != nil {
+ return nil
+ }
+
+ // Do it
+ g.TransitiveReduction()
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644
index 00000000..b31e2c76
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -0,0 +1,40 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// RootVariableTransformer is a GraphTransformer that adds all the root
+// variables to the graph.
+//
+// Root variables are currently no-ops but they must be added to the
+// graph since downstream things that depend on them must be able to
+// reach them.
+type RootVariableTransformer struct {
+ Module *module.Tree
+}
+
+func (t *RootVariableTransformer) Transform(g *Graph) error {
+ // If no config, no variables
+ if t.Module == nil {
+ return nil
+ }
+
+ // If we have no vars, we're done!
+ vars := t.Module.Config().Variables
+ if len(vars) == 0 {
+ return nil
+ }
+
+ // Add all variables here
+ for _, v := range vars {
+ node := &NodeRootVariable{
+ Config: v,
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644
index 00000000..6b1293fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// VertexTransformer is a GraphTransformer that transforms vertices
+// using the GraphVertexTransformers. The Transforms are run in sequential
+// order. If a transform replaces a vertex then the next transform will see
+// the new vertex.
+type VertexTransformer struct {
+ Transforms []GraphVertexTransformer
+}
+
+func (t *VertexTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ for _, vt := range t.Transforms {
+ newV, err := vt.Transform(v)
+ if err != nil {
+ return err
+ }
+
+ // If the vertex didn't change, then don't do anything more
+ if newV == v {
+ continue
+ }
+
+ // Vertex changed, replace it within the graph
+ if ok := g.Replace(v, newV); !ok {
+ // This should never happen, big problem
+ return fmt.Errorf(
+ "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
+ dag.VertexName(v), dag.VertexName(newV), v, newV)
+ }
+
+ // Replace v so that future transforms use the proper vertex
+ v = newV
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644
index 00000000..7c874592
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -0,0 +1,26 @@
+package terraform
+
+// UIInput is the interface that must be implemented to ask for input
+// from this user. This should forward the request to wherever the user
+// inputs things to ask for values.
+type UIInput interface {
+ Input(*InputOpts) (string, error)
+}
+
+// InputOpts are options for asking for input.
+type InputOpts struct {
+ // Id is a unique ID for the question being asked that might be
+ // used for logging or to look up a prior answered question.
+ Id string
+
+ // Query is a human-friendly question for inputting this value.
+ Query string
+
+ // Description is a description about what this option is. Be wary
+ // that this will probably be in a terminal so split lines as you see
+ // necessary.
+ Description string
+
+ // Default will be the value returned if no data is entered.
+ Default string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644
index 00000000..e3a07efa
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -0,0 +1,23 @@
+package terraform
+
+// MockUIInput is an implementation of UIInput that can be used for tests.
+type MockUIInput struct {
+ InputCalled bool
+ InputOpts *InputOpts
+ InputReturnMap map[string]string
+ InputReturnString string
+ InputReturnError error
+ InputFn func(*InputOpts) (string, error)
+}
+
+func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
+ i.InputCalled = true
+ i.InputOpts = opts
+ if i.InputFn != nil {
+ return i.InputFn(opts)
+ }
+ if i.InputReturnMap != nil {
+ return i.InputReturnMap[opts.Id], i.InputReturnError
+ }
+ return i.InputReturnString, i.InputReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644
index 00000000..2207d1d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// PrefixUIInput is an implementation of UIInput that prefixes the ID
+// with a string, allowing queries to be namespaced.
+type PrefixUIInput struct {
+ IdPrefix string
+ QueryPrefix string
+ UIInput UIInput
+}
+
+func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
+ opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
+ opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
+ return i.UIInput.Input(opts)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644
index 00000000..84427c63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
@@ -0,0 +1,7 @@
+package terraform
+
+// UIOutput is the interface that must be implemented to output
+// data to the end user.
+type UIOutput interface {
+ Output(string)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644
index 00000000..135a91c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
@@ -0,0 +1,9 @@
+package terraform
+
+type CallbackUIOutput struct {
+ OutputFn func(string)
+}
+
+func (o *CallbackUIOutput) Output(v string) {
+ o.OutputFn(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644
index 00000000..7852bc42
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -0,0 +1,16 @@
+package terraform
+
+// MockUIOutput is an implementation of UIOutput that can be used for tests.
+type MockUIOutput struct {
+ OutputCalled bool
+ OutputMessage string
+ OutputFn func(string)
+}
+
+func (o *MockUIOutput) Output(v string) {
+ o.OutputCalled = true
+ o.OutputMessage = v
+ if o.OutputFn != nil {
+ o.OutputFn(v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644
index 00000000..878a0312
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -0,0 +1,15 @@
+package terraform
+
+// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
+// for the output so that the hooks can handle it.
+type ProvisionerUIOutput struct {
+ Info *InstanceInfo
+ Type string
+ Hooks []Hook
+}
+
+func (o *ProvisionerUIOutput) Output(msg string) {
+ for _, h := range o.Hooks {
+ h.ProvisionOutput(o.Info, o.Type, msg)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644
index 00000000..f41f0d7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -0,0 +1,93 @@
+package terraform
+
+import (
+ "sort"
+ "strings"
+)
+
+// Semaphore is a wrapper around a channel to provide
+// utility methods to clarify that we are treating the
+// channel as a semaphore
+type Semaphore chan struct{}
+
+// NewSemaphore creates a semaphore that allows up
+// to a given limit of simultaneous acquisitions
+func NewSemaphore(n int) Semaphore {
+ if n == 0 {
+ panic("semaphore with limit 0")
+ }
+ ch := make(chan struct{}, n)
+ return Semaphore(ch)
+}
+
+// Acquire is used to acquire an available slot.
+// Blocks until available.
+func (s Semaphore) Acquire() {
+ s <- struct{}{}
+}
+
+// TryAcquire is used to do a non-blocking acquire.
+// Returns a bool indicating success
+func (s Semaphore) TryAcquire() bool {
+ select {
+ case s <- struct{}{}:
+ return true
+ default:
+ return false
+ }
+}
+
+// Release is used to return a slot. Acquire must
+// be called as a pre-condition.
+func (s Semaphore) Release() {
+ select {
+ case <-s:
+ default:
+ panic("release without an acquire")
+ }
+}
+
+// resourceProvider returns the provider name for the given type.
+func resourceProvider(t, alias string) string {
+ if alias != "" {
+ return alias
+ }
+
+ idx := strings.IndexRune(t, '_')
+ if idx == -1 {
+ // If no underscores, the resource name is assumed to be
+ // also the provider name, e.g. if the provider exposes
+ // only a single resource of each type.
+ return t
+ }
+
+ return t[:idx]
+}
+
+// strSliceContains checks if a given string is contained in a slice
+// When anybody asks why Go needs generics, here you go.
+func strSliceContains(haystack []string, needle string) bool {
+ for _, s := range haystack {
+ if s == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// deduplicate a slice of strings
+func uniqueStrings(s []string) []string {
+ if len(s) < 2 {
+ return s
+ }
+
+ sort.Strings(s)
+ result := make([]string, 1, len(s))
+ result[0] = s[0]
+ for i := 1; i < len(s); i++ {
+ if s[i] != result[len(result)-1] {
+ result = append(result, s[i])
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644
index 00000000..300f2adb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -0,0 +1,166 @@
+package terraform
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// Variables returns the fully loaded set of variables to use with
+// ContextOpts and NewContext, loading any additional variables from
+// the environment or any other sources.
+//
+// The given module tree doesn't need to be loaded.
+func Variables(
+ m *module.Tree,
+ override map[string]interface{}) (map[string]interface{}, error) {
+ result := make(map[string]interface{})
+
+ // Variables are loaded in the following sequence. Each additional step
+ // will override conflicting variable keys from prior steps:
+ //
+ // * Take default values from config
+ // * Take values from TF_VAR_x env vars
+ // * Take values specified in the "override" param which is usually
+ // from -var, -var-file, etc.
+ //
+
+ // First load from the config
+ for _, v := range m.Config().Variables {
+ // If the var has no default, ignore
+ if v.Default == nil {
+ continue
+ }
+
+ // If the type isn't a string, we use it as-is since it is a rich type
+ if v.Type() != config.VariableTypeString {
+ result[v.Name] = v.Default
+ continue
+ }
+
+ // v.Default has already been parsed as HCL but it may be an int type
+ switch typedDefault := v.Default.(type) {
+ case string:
+ if typedDefault == "" {
+ continue
+ }
+ result[v.Name] = typedDefault
+ case int, int64:
+ result[v.Name] = fmt.Sprintf("%d", typedDefault)
+ case float32, float64:
+ result[v.Name] = fmt.Sprintf("%f", typedDefault)
+ case bool:
+ result[v.Name] = fmt.Sprintf("%t", typedDefault)
+ default:
+ panic(fmt.Sprintf(
+ "Unknown default var type: %T\n\n"+
+ "THIS IS A BUG. Please report it.",
+ v.Default))
+ }
+ }
+
+ // Load from env vars
+ for _, v := range os.Environ() {
+ if !strings.HasPrefix(v, VarEnvPrefix) {
+ continue
+ }
+
+ // Strip off the prefix and get the value after the first "="
+ idx := strings.Index(v, "=")
+ k := v[len(VarEnvPrefix):idx]
+ v = v[idx+1:]
+
+ // Override the configuration-default values. Note that *not* finding the variable
+ // in configuration is OK, as we don't want to preclude people from having multiple
+ // sets of TF_VAR_whatever in their environment even if it is a little weird.
+ for _, schema := range m.Config().Variables {
+ if schema.Name != k {
+ continue
+ }
+
+ varType := schema.Type()
+ varVal, err := parseVariableAsHCL(k, v, varType)
+ if err != nil {
+ return nil, err
+ }
+
+ switch varType {
+ case config.VariableTypeMap:
+ if err := varSetMap(result, k, varVal); err != nil {
+ return nil, err
+ }
+ default:
+ result[k] = varVal
+ }
+ }
+ }
+
+ // Load from overrides
+ for k, v := range override {
+ for _, schema := range m.Config().Variables {
+ if schema.Name != k {
+ continue
+ }
+
+ switch schema.Type() {
+ case config.VariableTypeList:
+ result[k] = v
+ case config.VariableTypeMap:
+ if err := varSetMap(result, k, v); err != nil {
+ return nil, err
+ }
+ case config.VariableTypeString:
+ // Convert to a string and set. We don't catch any errors
+ // here because the validation step later should catch
+ // any type errors.
+ var strVal string
+ if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+ result[k] = strVal
+ } else {
+ result[k] = v
+ }
+ default:
+ panic(fmt.Sprintf(
+ "Unhandled var type: %T\n\n"+
+ "THIS IS A BUG. Please report it.",
+ schema.Type()))
+ }
+ }
+ }
+
+ return result, nil
+}
+
+// varSetMap sets or merges the map in "v" with the key "k" in the
+// "current" set of variables. This is just a private function to remove
+// duplicate logic in Variables
+func varSetMap(current map[string]interface{}, k string, v interface{}) error {
+ existing, ok := current[k]
+ if !ok {
+ current[k] = v
+ return nil
+ }
+
+ existingMap, ok := existing.(map[string]interface{})
+ if !ok {
+ panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
+ }
+
+ switch typedV := v.(type) {
+ case []map[string]interface{}:
+ for newKey, newVal := range typedV[0] {
+ existingMap[newKey] = newVal
+ }
+ case map[string]interface{}:
+ for newKey, newVal := range typedV {
+ existingMap[newKey] = newVal
+ }
+ default:
+ return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644
index 00000000..e184dc5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -0,0 +1,31 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+)
+
+// The main version number that is being run at the moment.
+const Version = "0.9.4"
+
+// A pre-release marker for the version. If this is "" (empty string)
+// then it means that it is a final release. Otherwise, this is a pre-release
+// such as "dev" (in development), "beta", "rc1", etc.
+const VersionPrerelease = ""
+
+// SemVersion is an instance of version.Version. This has the secondary
+// benefit of verifying during tests and init time that our version is a
+// proper semantic version, which should always be the case.
+var SemVersion = version.Must(version.NewVersion(Version))
+
+// VersionHeader is the header name used to send the current terraform version
+// in http requests.
+const VersionHeader = "Terraform-Version"
+
+func VersionString() string {
+ if VersionPrerelease != "" {
+ return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
+ }
+ return Version
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644
index 00000000..3cbbf560
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -0,0 +1,69 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// checkRequiredVersion verifies that any version requirements specified by
+// the configuration are met.
+//
+// This checks the root module as well as any additional version requirements
+// from child modules.
+//
+// This is tested in context_test.go.
+func checkRequiredVersion(m *module.Tree) error {
+ // Check any children
+ for _, c := range m.Children() {
+ if err := checkRequiredVersion(c); err != nil {
+ return err
+ }
+ }
+
+ var tf *config.Terraform
+ if c := m.Config(); c != nil {
+ tf = c.Terraform
+ }
+
+ // If there is no Terraform config or the required version isn't set,
+ // we move on.
+ if tf == nil || tf.RequiredVersion == "" {
+ return nil
+ }
+
+ // Path for errors
+ module := "root"
+ if path := normalizeModulePath(m.Path()); len(path) > 1 {
+ module = modulePrefixStr(path)
+ }
+
+ // Check this version requirement of this module
+ cs, err := version.NewConstraint(tf.RequiredVersion)
+ if err != nil {
+ return fmt.Errorf(
+ "%s: terraform.required_version %q syntax error: %s",
+ module,
+ tf.RequiredVersion, err)
+ }
+
+ if !cs.Check(SemVersion) {
+ return fmt.Errorf(
+ "The currently running version of Terraform doesn't meet the\n"+
+ "version requirements explicitly specified by the configuration.\n"+
+ "Please use the required version or update the configuration.\n"+
+ "Note that version requirements are usually set for a reason, so\n"+
+ "we recommend verifying with whoever set the version requirements\n"+
+ "prior to making any manual changes.\n\n"+
+ " Module: %s\n"+
+ " Required version: %s\n"+
+ " Current version: %s",
+ module,
+ tf.RequiredVersion,
+ SemVersion)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644
index 00000000..cbd78dd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
+
+var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
+
+func (i walkOperation) String() string {
+ if i >= walkOperation(len(_walkOperation_index)-1) {
+ return fmt.Sprintf("walkOperation(%d)", i)
+ }
+ return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
+}