summaryrefslogtreecommitdiff
path: root/vendor/github.com/hashicorp/terraform/helper
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp/terraform/helper')
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/README.md7
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/decode.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/validator.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/id.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/logging.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/error.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/id.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/map.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/resource.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go259
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go160
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/wait.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/README.md11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/equal.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go333
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go232
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go319
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go400
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go180
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go502
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go237
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go1537
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/serialize.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/closer.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go128
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go151
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/value.go79
47 files changed, 8349 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/terraform/helper/README.md b/vendor/github.com/hashicorp/terraform/helper/README.md
new file mode 100644
index 00000000..d0fee068
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/README.md
@@ -0,0 +1,7 @@
+# Helper Libraries
+
+This folder contains helper libraries for Terraform plugins. A running
+joke is that this is "Terraform standard library" for plugins. The goal
+of the packages in this directory are to provide high-level helpers to
+make it easier to implement the various aspects of writing a plugin for
+Terraform.
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
new file mode 100644
index 00000000..f470c9b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
@@ -0,0 +1,28 @@
+package config
+
+import (
+ "github.com/mitchellh/mapstructure"
+)
+
+func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) {
+ var md mapstructure.Metadata
+ decoderConfig := &mapstructure.DecoderConfig{
+ Metadata: &md,
+ Result: target,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := mapstructure.NewDecoder(decoderConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, raw := range raws {
+ err := decoder.Decode(raw)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &md, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
new file mode 100644
index 00000000..1a6e023b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
@@ -0,0 +1,214 @@
+package config
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/flatmap"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Validator is a helper that helps you validate the configuration
+// of your resource, resource provider, etc.
+//
+// At the most basic level, set the Required and Optional lists to be
+// specifiers of keys that are required or optional. If a key shows up
+// that isn't in one of these two lists, then an error is generated.
+//
+// The "specifiers" allowed in this is a fairly rich syntax to help
+// describe the format of your configuration:
+//
+// * Basic keys are just strings. For example: "foo" will match the
+// "foo" key.
+//
+// * Nested structure keys can be matched by doing
+// "listener.*.foo". This will verify that there is at least one
+// listener element that has the "foo" key set.
+//
+// * The existence of a nested structure can be checked by simply
+// doing "listener.*" which will verify that there is at least
+// one element in the "listener" structure. This is NOT
+// validating that "listener" is an array. It is validating
+// that it is a nested structure in the configuration.
+//
+type Validator struct {
+ Required []string
+ Optional []string
+}
+
+func (v *Validator) Validate(
+ c *terraform.ResourceConfig) (ws []string, es []error) {
+ // Flatten the configuration so it is easier to reason about
+ flat := flatmap.Flatten(c.Raw)
+
+ keySet := make(map[string]validatorKey)
+ for i, vs := range [][]string{v.Required, v.Optional} {
+ req := i == 0
+ for _, k := range vs {
+ vk, err := newValidatorKey(k, req)
+ if err != nil {
+ es = append(es, err)
+ continue
+ }
+
+ keySet[k] = vk
+ }
+ }
+
+ purged := make([]string, 0)
+ for _, kv := range keySet {
+ p, w, e := kv.Validate(flat)
+ if len(w) > 0 {
+ ws = append(ws, w...)
+ }
+ if len(e) > 0 {
+ es = append(es, e...)
+ }
+
+ purged = append(purged, p...)
+ }
+
+ // Delete all the keys we processed in order to find
+ // the unknown keys.
+ for _, p := range purged {
+ delete(flat, p)
+ }
+
+ // The rest are unknown
+ for k, _ := range flat {
+ es = append(es, fmt.Errorf("Unknown configuration: %s", k))
+ }
+
+ return
+}
+
+type validatorKey interface {
+ // Validate validates the given configuration and returns viewed keys,
+ // warnings, and errors.
+ Validate(map[string]string) ([]string, []string, []error)
+}
+
+func newValidatorKey(k string, req bool) (validatorKey, error) {
+ var result validatorKey
+
+ parts := strings.Split(k, ".")
+ if len(parts) > 1 && parts[1] == "*" {
+ result = &nestedValidatorKey{
+ Parts: parts,
+ Required: req,
+ }
+ } else {
+ result = &basicValidatorKey{
+ Key: k,
+ Required: req,
+ }
+ }
+
+ return result, nil
+}
+
+// basicValidatorKey validates keys that are basic such as "foo"
+type basicValidatorKey struct {
+ Key string
+ Required bool
+}
+
+func (v *basicValidatorKey) Validate(
+ m map[string]string) ([]string, []string, []error) {
+ for k, _ := range m {
+ // If we have the exact key its a match
+ if k == v.Key {
+ return []string{k}, nil, nil
+ }
+ }
+
+ if !v.Required {
+ return nil, nil, nil
+ }
+
+ return nil, nil, []error{fmt.Errorf(
+ "Key not found: %s", v.Key)}
+}
+
+type nestedValidatorKey struct {
+ Parts []string
+ Required bool
+}
+
+func (v *nestedValidatorKey) validate(
+ m map[string]string,
+ prefix string,
+ offset int) ([]string, []string, []error) {
+ if offset >= len(v.Parts) {
+ // We're at the end. Look for a specific key.
+ v2 := &basicValidatorKey{Key: prefix, Required: v.Required}
+ return v2.Validate(m)
+ }
+
+ current := v.Parts[offset]
+
+ // If we're at offset 0, special case to start at the next one.
+ if offset == 0 {
+ return v.validate(m, current, offset+1)
+ }
+
+ // Determine if we're doing a "for all" or a specific key
+ if current != "*" {
+ // We're looking at a specific key, continue on.
+ return v.validate(m, prefix+"."+current, offset+1)
+ }
+
+ // We're doing a "for all", so we loop over.
+ countStr, ok := m[prefix+".#"]
+ if !ok {
+ if !v.Required {
+ // It wasn't required, so its no problem.
+ return nil, nil, nil
+ }
+
+ return nil, nil, []error{fmt.Errorf(
+ "Key not found: %s", prefix)}
+ }
+
+ count, err := strconv.ParseInt(countStr, 0, 0)
+ if err != nil {
+ // This shouldn't happen if flatmap works properly
+ panic("invalid flatmap array")
+ }
+
+ var e []error
+ var w []string
+ u := make([]string, 1, count+1)
+ u[0] = prefix + ".#"
+ for i := 0; i < int(count); i++ {
+ prefix := fmt.Sprintf("%s.%d", prefix, i)
+
+ // Mark that we saw this specific key
+ u = append(u, prefix)
+
+ // Mark all prefixes of this
+ for k, _ := range m {
+ if !strings.HasPrefix(k, prefix+".") {
+ continue
+ }
+ u = append(u, k)
+ }
+
+ // If we have more parts, then validate deeper
+ if offset+1 < len(v.Parts) {
+ u2, w2, e2 := v.validate(m, prefix, offset+1)
+
+ u = append(u, u2...)
+ w = append(w, w2...)
+ e = append(e, e2...)
+ }
+ }
+
+ return u, w, e
+}
+
+func (v *nestedValidatorKey) Validate(
+ m map[string]string) ([]string, []string, []error) {
+ return v.validate(m, "", 0)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
new file mode 100644
index 00000000..18b8837c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
@@ -0,0 +1,154 @@
+// experiment package contains helper functions for tracking experimental
+// features throughout Terraform.
+//
+// This package should be used for creating, enabling, querying, and deleting
+// experimental features. By unifying all of that onto a single interface,
+// we can have the Go compiler help us by enforcing every place we touch
+// an experimental feature.
+//
+// To create a new experiment:
+//
+// 1. Add the experiment to the global vars list below, prefixed with X_
+//
+// 2. Add the experiment variable to the All listin the init() function
+//
+// 3. Use it!
+//
+// To remove an experiment:
+//
+// 1. Delete the experiment global var.
+//
+// 2. Try to compile and fix all the places where the var was referenced.
+//
+// To use an experiment:
+//
+// 1. Use Flag() if you want the experiment to be available from the CLI.
+//
+// 2. Use Enabled() to check whether it is enabled.
+//
+// As a general user:
+//
+// 1. The `-Xexperiment-name` flag
+// 2. The `TF_X_<experiment-name>` env var.
+// 3. The `TF_X_FORCE` env var can be set to force an experimental feature
+// without human verifications.
+//
+package experiment
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// The experiments that are available are listed below. Any package in
+// Terraform defining an experiment should define the experiments below.
+// By keeping them all within the experiment package we force a single point
+// of definition and use. This allows the compiler to enforce references
+// so it becomes easy to remove the features.
+var (
+ // Shadow graph. This is already on by default. Disabling it will be
+ // allowed for awhile in order for it to not block operations.
+ X_shadow = newBasicID("shadow", "SHADOW", false)
+)
+
+// Global variables this package uses because we are a package
+// with global state.
+var (
+ // all is the list of all experiements. Do not modify this.
+ All []ID
+
+ // enabled keeps track of what flags have been enabled
+ enabled map[string]bool
+ enabledLock sync.Mutex
+
+ // Hidden "experiment" that forces all others to be on without verification
+ x_force = newBasicID("force", "FORCE", false)
+)
+
+func init() {
+ // The list of all experiments, update this when an experiment is added.
+ All = []ID{
+ X_shadow,
+ x_force,
+ }
+
+ // Load
+ reload()
+}
+
+// reload is used by tests to reload the global state. This is called by
+// init publicly.
+func reload() {
+ // Initialize
+ enabledLock.Lock()
+ enabled = make(map[string]bool)
+ enabledLock.Unlock()
+
+ // Set defaults and check env vars
+ for _, id := range All {
+ // Get the default value
+ def := id.Default()
+
+ // If we set it in the env var, default it to true
+ key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
+ if v := os.Getenv(key); v != "" {
+ def = v != "0"
+ }
+
+ // Set the default
+ SetEnabled(id, def)
+ }
+}
+
+// Enabled returns whether an experiment has been enabled or not.
+func Enabled(id ID) bool {
+ enabledLock.Lock()
+ defer enabledLock.Unlock()
+ return enabled[id.Flag()]
+}
+
+// SetEnabled sets an experiment to enabled/disabled. Please check with
+// the experiment docs for when calling this actually affects the experiment.
+func SetEnabled(id ID, v bool) {
+ enabledLock.Lock()
+ defer enabledLock.Unlock()
+ enabled[id.Flag()] = v
+}
+
+// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
+// advises users of this package to not verify with the user that they want
+// experimental behavior and to just continue with it.
+func Force() bool {
+ return Enabled(x_force)
+}
+
+// Flag configures the given FlagSet with the flags to configure
+// all active experiments.
+func Flag(fs *flag.FlagSet) {
+ for _, id := range All {
+ desc := id.Flag()
+ key := fmt.Sprintf("X%s", id.Flag())
+ fs.Var(&idValue{X: id}, key, desc)
+ }
+}
+
+// idValue implements flag.Value for setting the enabled/disabled state
+// of an experiment from the CLI.
+type idValue struct {
+ X ID
+}
+
+func (v *idValue) IsBoolFlag() bool { return true }
+func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) }
+func (v *idValue) Set(raw string) error {
+ b, err := strconv.ParseBool(raw)
+ if err == nil {
+ SetEnabled(v.X, b)
+ }
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
new file mode 100644
index 00000000..8e2f7073
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
@@ -0,0 +1,34 @@
+package experiment
+
+// ID represents an experimental feature.
+//
+// The global vars defined on this package should be used as ID values.
+// This interface is purposely not implement-able outside of this package
+// so that we can rely on the Go compiler to enforce all experiment references.
+type ID interface {
+ Env() string
+ Flag() string
+ Default() bool
+
+ unexported() // So the ID can't be implemented externally.
+}
+
+// basicID implements ID.
+type basicID struct {
+ EnvValue string
+ FlagValue string
+ DefaultValue bool
+}
+
+func newBasicID(flag, env string, def bool) ID {
+ return &basicID{
+ EnvValue: env,
+ FlagValue: flag,
+ DefaultValue: def,
+ }
+}
+
+func (id *basicID) Env() string { return id.EnvValue }
+func (id *basicID) Flag() string { return id.FlagValue }
+func (id *basicID) Default() bool { return id.DefaultValue }
+func (id *basicID) unexported() {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
new file mode 100644
index 00000000..64d8263e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
@@ -0,0 +1,22 @@
+package hashcode
+
+import (
+ "hash/crc32"
+)
+
+// String hashes a string to a unique hashcode.
+//
+// crc32 returns a uint32, but for our use we need
+// and non negative integer. Here we cast to an integer
+// and invert it if the result is negative.
+func String(s string) int {
+ v := int(crc32.ChecksumIEEE([]byte(s)))
+ if v >= 0 {
+ return v
+ }
+ if -v >= 0 {
+ return -v
+ }
+ // v == MinInt
+ return 0
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
new file mode 100644
index 00000000..67be1df1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
@@ -0,0 +1,41 @@
+package hilmapstructure
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+var hilMapstructureDecodeHookEmptySlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookEmptyMap map[string]interface{}
+
+// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a
+// DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func WeakDecode(m interface{}, rawVal interface{}) error {
+ config := &mapstructure.DecoderConfig{
+ DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+ sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice)
+ stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+ mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap)
+
+ if (source == sliceType || source == stringSliceType) && target == mapType {
+ return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}")
+ }
+
+ return val, nil
+ },
+ WeaklyTypedInput: true,
+ Result: rawVal,
+ }
+
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
new file mode 100644
index 00000000..433cd77d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
@@ -0,0 +1,100 @@
+package logging
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "syscall"
+
+ "github.com/hashicorp/logutils"
+)
+
+// These are the environmental variables that determine if we log, and if
+// we log whether or not the log should go to a file.
+const (
+ EnvLog = "TF_LOG" // Set to True
+ EnvLogFile = "TF_LOG_PATH" // Set to a file
+)
+
+var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
+
+// LogOutput determines where we should send logs (if anywhere) and the log level.
+func LogOutput() (logOutput io.Writer, err error) {
+ logOutput = ioutil.Discard
+
+ logLevel := LogLevel()
+ if logLevel == "" {
+ return
+ }
+
+ logOutput = os.Stderr
+ if logPath := os.Getenv(EnvLogFile); logPath != "" {
+ var err error
+ logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // This was the default since the beginning
+ logOutput = &logutils.LevelFilter{
+ Levels: validLevels,
+ MinLevel: logutils.LogLevel(logLevel),
+ Writer: logOutput,
+ }
+
+ return
+}
+
+// SetOutput checks for a log destination with LogOutput, and calls
+// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses
+// ioutil.Discard. Any error from LogOutout is fatal.
+func SetOutput() {
+ out, err := LogOutput()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if out == nil {
+ out = ioutil.Discard
+ }
+
+ log.SetOutput(out)
+}
+
+// LogLevel returns the current log level string based the environment vars
+func LogLevel() string {
+ envLevel := os.Getenv(EnvLog)
+ if envLevel == "" {
+ return ""
+ }
+
+ logLevel := "TRACE"
+ if isValidLogLevel(envLevel) {
+ // allow following for better ux: info, Info or INFO
+ logLevel = strings.ToUpper(envLevel)
+ } else {
+ log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
+ envLevel, validLevels)
+ }
+
+ return logLevel
+}
+
+// IsDebugOrHigher returns whether or not the current log level is debug or trace
+func IsDebugOrHigher() bool {
+ level := string(LogLevel())
+ return level == "DEBUG" || level == "TRACE"
+}
+
+func isValidLogLevel(level string) bool {
+ for _, l := range validLevels {
+ if strings.ToUpper(level) == string(l) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
new file mode 100644
index 00000000..7ee21614
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
@@ -0,0 +1,79 @@
+package resource
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+type NotFoundError struct {
+ LastError error
+ LastRequest interface{}
+ LastResponse interface{}
+ Message string
+ Retries int
+}
+
+func (e *NotFoundError) Error() string {
+ if e.Message != "" {
+ return e.Message
+ }
+
+ if e.Retries > 0 {
+ return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
+ }
+
+ return "couldn't find resource"
+}
+
+// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
+type UnexpectedStateError struct {
+ LastError error
+ State string
+ ExpectedState []string
+}
+
+func (e *UnexpectedStateError) Error() string {
+ return fmt.Sprintf(
+ "unexpected state '%s', wanted target '%s'. last error: %s",
+ e.State,
+ strings.Join(e.ExpectedState, ", "),
+ e.LastError,
+ )
+}
+
+// TimeoutError is returned when WaitForState times out
+type TimeoutError struct {
+ LastError error
+ LastState string
+ Timeout time.Duration
+ ExpectedState []string
+}
+
+func (e *TimeoutError) Error() string {
+ expectedState := "resource to be gone"
+ if len(e.ExpectedState) > 0 {
+ expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
+ }
+
+ extraInfo := make([]string, 0)
+ if e.LastState != "" {
+ extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
+ }
+ if e.Timeout > 0 {
+ extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
+ }
+
+ suffix := ""
+ if len(extraInfo) > 0 {
+ suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
+ }
+
+ if e.LastError != nil {
+ return fmt.Sprintf("timeout while waiting for %s%s: %s",
+ expectedState, suffix, e.LastError)
+ }
+
+ return fmt.Sprintf("timeout while waiting for %s%s",
+ expectedState, suffix)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
new file mode 100644
index 00000000..629582b3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
@@ -0,0 +1,39 @@
+package resource
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "sync"
+)
+
+const UniqueIdPrefix = `terraform-`
+
+// idCounter is a randomly seeded monotonic counter for generating ordered
+// unique ids. It uses a big.Int so we can easily increment a long numeric
+// string. The max possible hex value here with 12 random bytes is
+// "01000000000000000000000000", so there's no chance of rollover during
+// operation.
+var idMutex sync.Mutex
+var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
+
+// Helper for a resource to generate a unique identifier w/ default prefix
+func UniqueId() string {
+ return PrefixedUniqueId(UniqueIdPrefix)
+}
+
+// Helper for a resource to generate a unique identifier w/ given prefix
+//
+// After the prefix, the ID consists of an incrementing 26 digit value (to match
+// previous timestamp output).
+func PrefixedUniqueId(prefix string) string {
+ idMutex.Lock()
+ defer idMutex.Unlock()
+ return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
+}
+
+func randomBytes(n int) []byte {
+ b := make([]byte, n)
+ rand.Read(b)
+ return b
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
new file mode 100644
index 00000000..a465136f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
@@ -0,0 +1,140 @@
+package resource
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Map is a map of resources that are supported, and provides helpers for
+// more easily implementing a ResourceProvider.
+type Map struct {
+ Mapping map[string]Resource
+}
+
+func (m *Map) Validate(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := m.Mapping[t]
+ if !ok {
+ return nil, []error{fmt.Errorf("Unknown resource type: %s", t)}
+ }
+
+ // If there is no validator set, then it is valid
+ if r.ConfigValidator == nil {
+ return nil, nil
+ }
+
+ return r.ConfigValidator.Validate(c)
+}
+
+// Apply performs a create or update depending on the diff, and calls
+// the proper function on the matching Resource.
+func (m *Map) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff,
+ meta interface{}) (*terraform.InstanceState, error) {
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ if d.Destroy || d.RequiresNew() {
+ if s.ID != "" {
+ // Destroy the resource if it is created
+ err := r.Destroy(s, meta)
+ if err != nil {
+ return s, err
+ }
+
+ s.ID = ""
+ }
+
+ // If we're only destroying, and not creating, then return now.
+ // Otherwise, we continue so that we can create a new resource.
+ if !d.RequiresNew() {
+ return nil, nil
+ }
+ }
+
+ var result *terraform.InstanceState
+ var err error
+ if s.ID == "" {
+ result, err = r.Create(s, d, meta)
+ } else {
+ if r.Update == nil {
+ return s, fmt.Errorf(
+ "Resource type '%s' doesn't support update",
+ info.Type)
+ }
+
+ result, err = r.Update(s, d, meta)
+ }
+ if result != nil {
+ if result.Attributes == nil {
+ result.Attributes = make(map[string]string)
+ }
+
+ result.Attributes["id"] = result.ID
+ }
+
+ return result, err
+}
+
+// Diff performs a diff on the proper resource type.
+func (m *Map) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig,
+ meta interface{}) (*terraform.InstanceDiff, error) {
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ return r.Diff(s, c, meta)
+}
+
+// Refresh performs a Refresh on the proper resource type.
+//
+// Refresh on the Resource won't be called if the state represents a
+// non-created resource (ID is blank).
+//
+// An error is returned if the resource isn't registered.
+func (m *Map) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ meta interface{}) (*terraform.InstanceState, error) {
+ // If the resource isn't created, don't refresh.
+ if s.ID == "" {
+ return s, nil
+ }
+
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ return r.Refresh(s, meta)
+}
+
+// Resources returns all the resources that are supported by this
+// resource map and can be used to satisfy the Resources method of
+// a ResourceProvider.
+func (m *Map) Resources() []terraform.ResourceType {
+ ks := make([]string, 0, len(m.Mapping))
+ for k, _ := range m.Mapping {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ rs := make([]terraform.ResourceType, 0, len(m.Mapping))
+ for _, k := range ks {
+ rs = append(rs, terraform.ResourceType{
+ Name: k,
+ })
+ }
+
+ return rs
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
new file mode 100644
index 00000000..0d9c831a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
@@ -0,0 +1,49 @@
+package resource
+
+import (
+ "github.com/hashicorp/terraform/helper/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+type Resource struct {
+ ConfigValidator *config.Validator
+ Create CreateFunc
+ Destroy DestroyFunc
+ Diff DiffFunc
+ Refresh RefreshFunc
+ Update UpdateFunc
+}
+
+// CreateFunc is a function that creates a resource that didn't previously
+// exist.
+type CreateFunc func(
+ *terraform.InstanceState,
+ *terraform.InstanceDiff,
+ interface{}) (*terraform.InstanceState, error)
+
+// DestroyFunc is a function that destroys a resource that previously
+// exists using the state.
+type DestroyFunc func(
+ *terraform.InstanceState,
+ interface{}) error
+
+// DiffFunc is a function that performs a diff of a resource.
+type DiffFunc func(
+ *terraform.InstanceState,
+ *terraform.ResourceConfig,
+ interface{}) (*terraform.InstanceDiff, error)
+
+// RefreshFunc is a function that performs a refresh of a specific type
+// of resource.
+type RefreshFunc func(
+ *terraform.InstanceState,
+ interface{}) (*terraform.InstanceState, error)
+
+// UpdateFunc is a function that is called to update a resource that
+// previously existed. The difference between this and CreateFunc is that
+// the diff is guaranteed to only contain attributes that don't require
+// a new resource.
+type UpdateFunc func(
+ *terraform.InstanceState,
+ *terraform.InstanceDiff,
+ interface{}) (*terraform.InstanceState, error)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
new file mode 100644
index 00000000..37c586a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -0,0 +1,259 @@
+package resource
+
+import (
+ "log"
+ "time"
+)
+
+var refreshGracePeriod = 30 * time.Second
+
+// StateRefreshFunc is a function type used for StateChangeConf that is
+// responsible for refreshing the item being watched for a state change.
+//
+// It returns three results. `result` is any object that will be returned
+// as the final object after waiting for state change. This allows you to
+// return the final updated object, for example an EC2 instance after refreshing
+// it.
+//
+// `state` is the latest state of that object. And `err` is any error that
+// may have happened while refreshing the state.
+type StateRefreshFunc func() (result interface{}, state string, err error)
+
+// StateChangeConf is the configuration struct used for `WaitForState`.
+type StateChangeConf struct {
+ Delay time.Duration // Wait this time before starting checks
+ Pending []string // States that are "allowed" and will continue trying
+ Refresh StateRefreshFunc // Refreshes the current state
+ Target []string // Target state
+ Timeout time.Duration // The amount of time to wait before timeout
+ MinTimeout time.Duration // Smallest time to wait before refreshes
+ PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
+ NotFoundChecks int // Number of times to allow not found
+
+ // This is to work around inconsistent APIs
+ ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
+}
+
+// WaitForState watches an object and waits for it to achieve the state
+// specified in the configuration using the specified Refresh() func,
+// waiting the number of seconds specified in the timeout configuration.
+//
+// If the Refresh function returns a error, exit immediately with that error.
+//
+// If the Refresh function returns a state other than the Target state or one
+// listed in Pending, return immediately with an error.
+//
+// If the Timeout is exceeded before reaching the Target state, return an
+// error.
+//
+// Otherwise, result the result of the first call to the Refresh function to
+// reach the target state.
+func (conf *StateChangeConf) WaitForState() (interface{}, error) {
+ log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
+
+ notfoundTick := 0
+ targetOccurence := 0
+
+ // Set a default for times to check for not found
+ if conf.NotFoundChecks == 0 {
+ conf.NotFoundChecks = 20
+ }
+
+ if conf.ContinuousTargetOccurence == 0 {
+ conf.ContinuousTargetOccurence = 1
+ }
+
+ type Result struct {
+ Result interface{}
+ State string
+ Error error
+ Done bool
+ }
+
+ // Read every result from the refresh loop, waiting for a positive result.Done.
+ resCh := make(chan Result, 1)
+ // cancellation channel for the refresh loop
+ cancelCh := make(chan struct{})
+
+ result := Result{}
+
+ go func() {
+ defer close(resCh)
+
+ time.Sleep(conf.Delay)
+
+ // start with 0 delay for the first loop
+ var wait time.Duration
+
+ for {
+ // store the last result
+ resCh <- result
+
+ // wait and watch for cancellation
+ select {
+ case <-cancelCh:
+ return
+ case <-time.After(wait):
+ // first round had no wait
+ if wait == 0 {
+ wait = 100 * time.Millisecond
+ }
+ }
+
+ res, currentState, err := conf.Refresh()
+ result = Result{
+ Result: res,
+ State: currentState,
+ Error: err,
+ }
+
+ if err != nil {
+ resCh <- result
+ return
+ }
+
+ // If we're waiting for the absence of a thing, then return
+ if res == nil && len(conf.Target) == 0 {
+ targetOccurence++
+ if conf.ContinuousTargetOccurence == targetOccurence {
+ result.Done = true
+ resCh <- result
+ return
+ }
+ continue
+ }
+
+ if res == nil {
+ // If we didn't find the resource, check if we have been
+ // not finding it for awhile, and if so, report an error.
+ notfoundTick++
+ if notfoundTick > conf.NotFoundChecks {
+ result.Error = &NotFoundError{
+ LastError: err,
+ Retries: notfoundTick,
+ }
+ resCh <- result
+ return
+ }
+ } else {
+ // Reset the counter for when a resource isn't found
+ notfoundTick = 0
+ found := false
+
+ for _, allowed := range conf.Target {
+ if currentState == allowed {
+ found = true
+ targetOccurence++
+ if conf.ContinuousTargetOccurence == targetOccurence {
+ result.Done = true
+ resCh <- result
+ return
+ }
+ continue
+ }
+ }
+
+ for _, allowed := range conf.Pending {
+ if currentState == allowed {
+ found = true
+ targetOccurence = 0
+ break
+ }
+ }
+
+ if !found && len(conf.Pending) > 0 {
+ result.Error = &UnexpectedStateError{
+ LastError: err,
+ State: result.State,
+ ExpectedState: conf.Target,
+ }
+ resCh <- result
+ return
+ }
+ }
+
+ // Wait between refreshes using exponential backoff, except when
+ // waiting for the target state to reoccur.
+ if targetOccurence == 0 {
+ wait *= 2
+ }
+
+ // If a poll interval has been specified, choose that interval.
+ // Otherwise bound the default value.
+ if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
+ wait = conf.PollInterval
+ } else {
+ if wait < conf.MinTimeout {
+ wait = conf.MinTimeout
+ } else if wait > 10*time.Second {
+ wait = 10 * time.Second
+ }
+ }
+
+ log.Printf("[TRACE] Waiting %s before next try", wait)
+ }
+ }()
+
+ // store the last value result from the refresh loop
+ lastResult := Result{}
+
+ timeout := time.After(conf.Timeout)
+ for {
+ select {
+ case r, ok := <-resCh:
+ // channel closed, so return the last result
+ if !ok {
+ return lastResult.Result, lastResult.Error
+ }
+
+ // we reached the intended state
+ if r.Done {
+ return r.Result, r.Error
+ }
+
+ // still waiting, store the last result
+ lastResult = r
+
+ case <-timeout:
+ log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
+ log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
+
+ // cancel the goroutine and start our grace period timer
+ close(cancelCh)
+ timeout := time.After(refreshGracePeriod)
+
+ // we need a for loop and a label to break on, because we may have
+ // an extra response value to read, but still want to wait for the
+ // channel to close.
+ forSelect:
+ for {
+ select {
+ case r, ok := <-resCh:
+ if r.Done {
+ // the last refresh loop reached the desired state
+ return r.Result, r.Error
+ }
+
+ if !ok {
+ // the goroutine returned
+ break forSelect
+ }
+
+ // target state not reached, save the result for the
+ // TimeoutError and wait for the channel to close
+ lastResult = r
+ case <-timeout:
+ log.Println("[ERROR] WaitForState exceeded refresh grace period")
+ break forSelect
+ }
+ }
+
+ return nil, &TimeoutError{
+ LastError: lastResult.Error,
+ LastState: lastResult.State,
+ Timeout: conf.Timeout,
+ ExpectedState: conf.Target,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
new file mode 100644
index 00000000..04367c53
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -0,0 +1,790 @@
+package resource
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/go-getter"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/logging"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+const TestEnvVar = "TF_ACC"
+
+// TestProvider can be implemented by any ResourceProvider to provide custom
+// reset functionality at the start of an acceptance test.
+// The helper/schema Provider implements this interface.
+type TestProvider interface {
+ TestReset() error
+}
+
+// TestCheckFunc is the callback type used with acceptance tests to check
+// the state of a resource. The state passed in is the latest state known,
+// or in the case of being after a destroy, it is the last known state when
+// it was created.
+type TestCheckFunc func(*terraform.State) error
+
+// ImportStateCheckFunc is the check function for ImportState tests
+type ImportStateCheckFunc func([]*terraform.InstanceState) error
+
+// TestCase is a single acceptance test case used to test the apply/destroy
+// lifecycle of a resource in a specific configuration.
+//
+// When the destroy plan is executed, the config from the last TestStep
+// is used to plan it.
+type TestCase struct {
+ // IsUnitTest allows a test to run regardless of the TF_ACC
+ // environment variable. This should be used with care - only for
+ // fast tests on local resources (e.g. remote state with a local
+ // backend) but can be used to increase confidence in correct
+ // operation of Terraform without waiting for a full acctest run.
+ IsUnitTest bool
+
+ // PreCheck, if non-nil, will be called before any test steps are
+ // executed. It will only be executed in the case that the steps
+ // would run, so it can be used for some validation before running
+ // acceptance tests, such as verifying that keys are setup.
+ PreCheck func()
+
+ // Providers is the ResourceProvider that will be under test.
+ //
+ // Alternately, ProviderFactories can be specified for the providers
+ // that are valid. This takes priority over Providers.
+ //
+ // The end effect of each is the same: specifying the providers that
+ // are used within the tests.
+ Providers map[string]terraform.ResourceProvider
+ ProviderFactories map[string]terraform.ResourceProviderFactory
+
+ // PreventPostDestroyRefresh can be set to true for cases where data sources
+ // are tested alongside real resources
+ PreventPostDestroyRefresh bool
+
+ // CheckDestroy is called after the resource is finally destroyed
+ // to allow the tester to test that the resource is truly gone.
+ CheckDestroy TestCheckFunc
+
+ // Steps are the apply sequences done within the context of the
+ // same state. Each step can have its own check to verify correctness.
+ Steps []TestStep
+
+ // The settings below control the "ID-only refresh test." This is
+ // an enabled-by-default test that tests that a refresh can be
+ // refreshed with only an ID to result in the same attributes.
+ // This validates completeness of Refresh.
+ //
+ // IDRefreshName is the name of the resource to check. This will
+ // default to the first non-nil primary resource in the state.
+ //
+ // IDRefreshIgnore is a list of configuration keys that will be ignored.
+ IDRefreshName string
+ IDRefreshIgnore []string
+}
+
+// TestStep is a single apply sequence of a test, done within the
+// context of a state.
+//
+// Multiple TestSteps can be sequenced in a Test to allow testing
+// potentially complex update logic. In general, simply create/destroy
+// tests will only need one step.
+type TestStep struct {
+ // ResourceName should be set to the name of the resource
+ // that is being tested. Example: "aws_instance.foo". Various test
+ // modes use this to auto-detect state information.
+ //
+ // This is only required if the test mode settings below say it is
+ // for the mode you're using.
+ ResourceName string
+
+ // PreConfig is called before the Config is applied to perform any per-step
+ // setup that needs to happen. This is called regardless of "test mode"
+ // below.
+ PreConfig func()
+
+ //---------------------------------------------------------------
+ // Test modes. One of the following groups of settings must be
+ // set to determine what the test step will do. Ideally we would've
+ // used Go interfaces here but there are now hundreds of tests we don't
+ // want to re-type so instead we just determine which step logic
+ // to run based on what settings below are set.
+ //---------------------------------------------------------------
+
+ //---------------------------------------------------------------
+ // Plan, Apply testing
+ //---------------------------------------------------------------
+
+ // Config a string of the configuration to give to Terraform. If this
+ // is set, then the TestCase will execute this step with the same logic
+ // as a `terraform apply`.
+ Config string
+
+ // Check is called after the Config is applied. Use this step to
+ // make your own API calls to check the status of things, and to
+ // inspect the format of the ResourceState itself.
+ //
+ // If an error is returned, the test will fail. In this case, a
+ // destroy plan will still be attempted.
+ //
+ // If this is nil, no check is done on this step.
+ Check TestCheckFunc
+
+ // Destroy will create a destroy plan if set to true.
+ Destroy bool
+
+ // ExpectNonEmptyPlan can be set to true for specific types of tests that are
+ // looking to verify that a diff occurs
+ ExpectNonEmptyPlan bool
+
+ // ExpectError allows the construction of test cases that we expect to fail
+ // with an error. The specified regexp must match against the error for the
+ // test to pass.
+ ExpectError *regexp.Regexp
+
+ // PlanOnly can be set to only run `plan` with this configuration, and not
+ // actually apply it. This is useful for ensuring config changes result in
+ // no-op plans
+ PlanOnly bool
+
+ // PreventPostDestroyRefresh can be set to true for cases where data sources
+ // are tested alongside real resources
+ PreventPostDestroyRefresh bool
+
+ //---------------------------------------------------------------
+ // ImportState testing
+ //---------------------------------------------------------------
+
+ // ImportState, if true, will test the functionality of ImportState
+ // by importing the resource with ResourceName (must be set) and the
+ // ID of that resource.
+ ImportState bool
+
+ // ImportStateId is the ID to perform an ImportState operation with.
+ // This is optional. If it isn't set, then the resource ID is automatically
+ // determined by inspecting the state for ResourceName's ID.
+ ImportStateId string
+
+ // ImportStateIdPrefix is the prefix added in front of ImportStateId.
+ // This can be useful in complex import cases, where more than one
+ // attribute needs to be passed on as the Import ID. Mainly in cases
+ // where the ID is not known, and a known prefix needs to be added to
+ // the unset ImportStateId field.
+ ImportStateIdPrefix string
+
+ // ImportStateCheck checks the results of ImportState. It should be
+ // used to verify that the resulting value of ImportState has the
+ // proper resources, IDs, and attributes.
+ ImportStateCheck ImportStateCheckFunc
+
+ // ImportStateVerify, if true, will also check that the state values
+ // that are finally put into the state after import match for all the
+ // IDs returned by the Import.
+ //
+ // ImportStateVerifyIgnore are fields that should not be verified to
+ // be equal. These can be set to ephemeral fields or fields that can't
+ // be refreshed and don't matter.
+ ImportStateVerify bool
+ ImportStateVerifyIgnore []string
+}
+
+// Test performs an acceptance test on a resource.
+//
+// Tests are not run unless an environmental variable "TF_ACC" is
+// set to some non-empty value. This is to avoid test cases surprising
+// a user by creating real resources.
+//
+// Tests will fail unless the verbose flag (`go test -v`, or explicitly
+// the "-test.v" flag) is set. Because some acceptance tests take quite
+// long, we require the verbose flag so users are able to see progress
+// output.
+func Test(t TestT, c TestCase) {
+ // We only run acceptance tests if an env var is set because they're
+ // slow and generally require some outside configuration. You can opt out
+ // of this with OverrideEnvVar on individual TestCases.
+ if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest {
+ t.Skip(fmt.Sprintf(
+ "Acceptance tests skipped unless env '%s' set",
+ TestEnvVar))
+ return
+ }
+
+ logWriter, err := logging.LogOutput()
+ if err != nil {
+ t.Error(fmt.Errorf("error setting up logging: %s", err))
+ }
+ log.SetOutput(logWriter)
+
+ // We require verbose mode so that the user knows what is going on.
+ if !testTesting && !testing.Verbose() && !c.IsUnitTest {
+ t.Fatal("Acceptance tests must be run with the -v flag on tests")
+ return
+ }
+
+ // Run the PreCheck if we have it
+ if c.PreCheck != nil {
+ c.PreCheck()
+ }
+
+ ctxProviders, err := testProviderFactories(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ opts := terraform.ContextOpts{Providers: ctxProviders}
+
+ // A single state variable to track the lifecycle, starting with no state
+ var state *terraform.State
+
+ // Go through each step and run it
+ var idRefreshCheck *terraform.ResourceState
+ idRefresh := c.IDRefreshName != ""
+ errored := false
+ for i, step := range c.Steps {
+ var err error
+ log.Printf("[WARN] Test: Executing step %d", i)
+
+ // Determine the test mode to execute
+ if step.Config != "" {
+ state, err = testStepConfig(opts, state, step)
+ } else if step.ImportState {
+ state, err = testStepImportState(opts, state, step)
+ } else {
+ err = fmt.Errorf(
+ "unknown test mode for step. Please see TestStep docs\n\n%#v",
+ step)
+ }
+
+ // If there was an error, exit
+ if err != nil {
+ // Perhaps we expected an error? Check if it matches
+ if step.ExpectError != nil {
+ if !step.ExpectError.MatchString(err.Error()) {
+ errored = true
+ t.Error(fmt.Sprintf(
+ "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n",
+ i, err, step.ExpectError))
+ break
+ }
+ } else {
+ errored = true
+ t.Error(fmt.Sprintf(
+ "Step %d error: %s", i, err))
+ break
+ }
+ }
+
+ // If we've never checked an id-only refresh and our state isn't
+ // empty, find the first resource and test it.
+ if idRefresh && idRefreshCheck == nil && !state.Empty() {
+ // Find the first non-nil resource in the state
+ for _, m := range state.Modules {
+ if len(m.Resources) > 0 {
+ if v, ok := m.Resources[c.IDRefreshName]; ok {
+ idRefreshCheck = v
+ }
+
+ break
+ }
+ }
+
+ // If we have an instance to check for refreshes, do it
+ // immediately. We do it in the middle of another test
+ // because it shouldn't affect the overall state (refresh
+ // is read-only semantically) and we want to fail early if
+ // this fails. If refresh isn't read-only, then this will have
+ // caught a different bug.
+ if idRefreshCheck != nil {
+ log.Printf(
+ "[WARN] Test: Running ID-only refresh check on %s",
+ idRefreshCheck.Primary.ID)
+ if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil {
+ log.Printf("[ERROR] Test: ID-only test failed: %s", err)
+ t.Error(fmt.Sprintf(
+ "[ERROR] Test: ID-only test failed: %s", err))
+ break
+ }
+ }
+ }
+ }
+
+ // If we never checked an id-only refresh, it is a failure.
+ if idRefresh {
+ if !errored && len(c.Steps) > 0 && idRefreshCheck == nil {
+ t.Error("ID-only refresh check never ran.")
+ }
+ }
+
+ // If we have a state, then run the destroy
+ if state != nil {
+ lastStep := c.Steps[len(c.Steps)-1]
+ destroyStep := TestStep{
+ Config: lastStep.Config,
+ Check: c.CheckDestroy,
+ Destroy: true,
+ PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
+ }
+
+ log.Printf("[WARN] Test: Executing destroy step")
+ state, err := testStep(opts, state, destroyStep)
+ if err != nil {
+ t.Error(fmt.Sprintf(
+ "Error destroying resource! WARNING: Dangling resources\n"+
+ "may exist. The full state and error is shown below.\n\n"+
+ "Error: %s\n\nState: %s",
+ err,
+ state))
+ }
+ } else {
+ log.Printf("[WARN] Skipping destroy test since there is no state.")
+ }
+}
+
+// testProviderFactories is a helper to build the ResourceProviderFactory map
+// with pre instantiated ResourceProviders, so that we can reset them for the
+// test, while only calling the factory function once.
+// Any errors are stored so that they can be returned by the factory in
+// terraform to match non-test behavior.
+func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
+ ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
+ if ctxProviders == nil {
+ ctxProviders = make(map[string]terraform.ResourceProviderFactory)
+ }
+ // add any fixed providers
+ for k, p := range c.Providers {
+ ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
+ }
+
+ // reset the providers if needed
+ for k, pf := range ctxProviders {
+ // we can ignore any errors here, if we don't have a provider to reset
+ // the error will be handled later
+ p, err := pf()
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := p.(TestProvider); ok {
+ err := p.TestReset()
+ if err != nil {
+ return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err)
+ }
+ }
+ }
+
+ return ctxProviders, nil
+}
+
+// UnitTest is a helper to force the acceptance testing harness to run in the
+// normal unit test suite. This should only be used for resource that don't
+// have any external dependencies.
+func UnitTest(t TestT, c TestCase) {
+ c.IsUnitTest = true
+ Test(t, c)
+}
+
+func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error {
+ // TODO: We guard by this right now so master doesn't explode. We
+ // need to remove this eventually to make this part of the normal tests.
+ if os.Getenv("TF_ACC_IDONLY") == "" {
+ return nil
+ }
+
+ name := fmt.Sprintf("%s.foo", r.Type)
+
+ // Build the state. The state is just the resource with an ID. There
+ // are no attributes. We only set what is needed to perform a refresh.
+ state := terraform.NewState()
+ state.RootModule().Resources[name] = &terraform.ResourceState{
+ Type: r.Type,
+ Primary: &terraform.InstanceState{
+ ID: r.Primary.ID,
+ },
+ }
+
+ // Create the config module. We use the full config because Refresh
+ // doesn't have access to it and we may need things like provider
+ // configurations. The initial implementation of id-only checks used
+ // an empty config module, but that caused the aforementioned problems.
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return err
+ }
+
+ // Initialize the context
+ opts.Module = mod
+ opts.State = state
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return err
+ }
+ if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+ if len(es) > 0 {
+ estrs := make([]string, len(es))
+ for i, e := range es {
+ estrs[i] = e.Error()
+ }
+ return fmt.Errorf(
+ "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+ ws, estrs)
+ }
+
+ log.Printf("[WARN] Config warnings: %#v", ws)
+ }
+
+ // Refresh!
+ state, err = ctx.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing: %s", err)
+ }
+
+ // Verify attribute equivalence.
+ actualR := state.RootModule().Resources[name]
+ if actualR == nil {
+ return fmt.Errorf("Resource gone!")
+ }
+ if actualR.Primary == nil {
+ return fmt.Errorf("Resource has no primary instance")
+ }
+ actual := actualR.Primary.Attributes
+ expected := r.Primary.Attributes
+ // Remove fields we're ignoring
+ for _, v := range c.IDRefreshIgnore {
+ for k, _ := range actual {
+ if strings.HasPrefix(k, v) {
+ delete(actual, k)
+ }
+ }
+ for k, _ := range expected {
+ if strings.HasPrefix(k, v) {
+ delete(expected, k)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ // Determine only the different attributes
+ for k, v := range expected {
+ if av, ok := actual[k]; ok && v == av {
+ delete(expected, k)
+ delete(actual, k)
+ }
+ }
+
+ spewConf := spew.NewDefaultConfig()
+ spewConf.SortKeys = true
+ return fmt.Errorf(
+ "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+ "\n\n%s\n\n%s",
+ spewConf.Sdump(actual), spewConf.Sdump(expected))
+ }
+
+ return nil
+}
+
+func testModule(
+ opts terraform.ContextOpts,
+ step TestStep) (*module.Tree, error) {
+ if step.PreConfig != nil {
+ step.PreConfig()
+ }
+
+ cfgPath, err := ioutil.TempDir("", "tf-test")
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary directory for config: %s", err)
+ }
+ defer os.RemoveAll(cfgPath)
+
+ // Write the configuration
+ cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary file for config: %s", err)
+ }
+
+ _, err = io.Copy(cfgF, strings.NewReader(step.Config))
+ cfgF.Close()
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary file for config: %s", err)
+ }
+
+ // Parse the configuration
+ mod, err := module.NewTreeModule("", cfgPath)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error loading configuration: %s", err)
+ }
+
+ // Load the modules
+ modStorage := &getter.FolderStorage{
+ StorageDir: filepath.Join(cfgPath, ".tfmodules"),
+ }
+ err = mod.Load(modStorage, module.GetModeGet)
+ if err != nil {
+ return nil, fmt.Errorf("Error downloading modules: %s", err)
+ }
+
+ return mod, nil
+}
+
+func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
+ if c.ResourceName == "" {
+ return nil, fmt.Errorf("ResourceName must be set in TestStep")
+ }
+
+ for _, m := range state.Modules {
+ if len(m.Resources) > 0 {
+ if v, ok := m.Resources[c.ResourceName]; ok {
+ return v, nil
+ }
+ }
+ }
+
+ return nil, fmt.Errorf(
+ "Resource specified by ResourceName couldn't be found: %s", c.ResourceName)
+}
+
+// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+ return func(s *terraform.State) error {
+ for i, f := range fs {
+ if err := f(s); err != nil {
+ return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)
+ }
+ }
+
+ return nil
+ }
+}
+
+// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+//
+// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the
+// TestCheckFuncs and aggregates failures.
+func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+ return func(s *terraform.State) error {
+ var result *multierror.Error
+
+ for i, f := range fs {
+ if err := f(s); err != nil {
+ result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err))
+ }
+ }
+
+ return result.ErrorOrNil()
+ }
+}
+
+// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value
+// exists in state for the given name/key combination. It is useful when
+// testing that computed values were set, when it is not possible to
+// know ahead of time what the values will be.
+func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if val, ok := is.Attributes[key]; ok && val != "" {
+ return nil
+ }
+
+ return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
+ }
+}
+
+// TestCheckResourceAttr is a TestCheckFunc which validates
+// the value in state for the given name/key combination.
+func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if v, ok := is.Attributes[key]; !ok || v != value {
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", name, key)
+ }
+
+ return fmt.Errorf(
+ "%s: Attribute '%s' expected %#v, got %#v",
+ name,
+ key,
+ value,
+ v)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
+// NO value exists in state for the given name/key combination.
+func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if _, ok := is.Attributes[key]; ok {
+ return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
+ }
+
+ return nil
+ }
+}
+
+// TestMatchResourceAttr is a TestCheckFunc which checks that the value
+// in state for the given name/key combination matches the given regex.
+func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if !r.MatchString(is.Attributes[key]) {
+ return fmt.Errorf(
+ "%s: Attribute '%s' didn't match %q, got %#v",
+ name,
+ key,
+ r.String(),
+ is.Attributes[key])
+ }
+
+ return nil
+ }
+}
+
+// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
+// value is a pointer so that it can be updated while the test is running.
+// It will only be dereferenced at the point this step is run.
+func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ return TestCheckResourceAttr(name, key, *value)(s)
+ }
+}
+
+// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
+// in state for a pair of name/key combinations are equal.
+func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ isFirst, err := primaryInstanceState(s, nameFirst)
+ if err != nil {
+ return err
+ }
+ vFirst, ok := isFirst.Attributes[keyFirst]
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
+ }
+
+ isSecond, err := primaryInstanceState(s, nameSecond)
+ if err != nil {
+ return err
+ }
+ vSecond, ok := isSecond.Attributes[keySecond]
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
+ }
+
+ if vFirst != vSecond {
+ return fmt.Errorf(
+ "%s: Attribute '%s' expected %#v, got %#v",
+ nameFirst,
+ keyFirst,
+ vSecond,
+ vFirst)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckOutput checks an output in the Terraform configuration
+func TestCheckOutput(name, value string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ ms := s.RootModule()
+ rs, ok := ms.Outputs[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ if rs.Value != value {
+ return fmt.Errorf(
+ "Output '%s': expected %#v, got %#v",
+ name,
+ value,
+ rs)
+ }
+
+ return nil
+ }
+}
+
+func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc {
+ return func(s *terraform.State) error {
+ ms := s.RootModule()
+ rs, ok := ms.Outputs[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ if !r.MatchString(rs.Value.(string)) {
+ return fmt.Errorf(
+ "Output '%s': %#v didn't match %q",
+ name,
+ rs,
+ r.String())
+ }
+
+ return nil
+ }
+}
+
+// TestT is the interface used to handle the test lifecycle of a test.
+//
+// Users should just use a *testing.T object, which implements this.
+type TestT interface {
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Skip(args ...interface{})
+}
+
+// This is set to true by unit tests to alter some behavior
+var testTesting = false
+
+// primaryInstanceState returns the primary instance state for the given resource name.
+func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
+ ms := s.RootModule()
+ rs, ok := ms.Resources[name]
+ if !ok {
+ return nil, fmt.Errorf("Not found: %s", name)
+ }
+
+ is := rs.Primary
+ if is == nil {
+ return nil, fmt.Errorf("No primary instance: %s", name)
+ }
+
+ return is, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
new file mode 100644
index 00000000..537a11c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -0,0 +1,160 @@
+package resource
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepConfig runs a config-mode test step
+func testStepConfig(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ return testStep(opts, state, step)
+}
+
+func testStep(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return state, err
+ }
+
+ // Build the context
+ opts.Module = mod
+ opts.State = state
+ opts.Destroy = step.Destroy
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return state, fmt.Errorf("Error initializing context: %s", err)
+ }
+ if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+ if len(es) > 0 {
+ estrs := make([]string, len(es))
+ for i, e := range es {
+ estrs[i] = e.Error()
+ }
+ return state, fmt.Errorf(
+ "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+ ws, estrs)
+ }
+ log.Printf("[WARN] Config warnings: %#v", ws)
+ }
+
+ // Refresh!
+ state, err = ctx.Refresh()
+ if err != nil {
+ return state, fmt.Errorf(
+ "Error refreshing: %s", err)
+ }
+
+ // If this step is a PlanOnly step, skip over this first Plan and subsequent
+ // Apply, and use the follow up Plan that checks for perpetual diffs
+ if !step.PlanOnly {
+ // Plan!
+ if p, err := ctx.Plan(); err != nil {
+ return state, fmt.Errorf(
+ "Error planning: %s", err)
+ } else {
+ log.Printf("[WARN] Test: Step plan: %s", p)
+ }
+
+ // We need to keep a copy of the state prior to destroying
+ // such that destroy steps can verify their behaviour in the check
+ // function
+ stateBeforeApplication := state.DeepCopy()
+
+ // Apply!
+ state, err = ctx.Apply()
+ if err != nil {
+ return state, fmt.Errorf("Error applying: %s", err)
+ }
+
+ // Check! Excitement!
+ if step.Check != nil {
+ if step.Destroy {
+ if err := step.Check(stateBeforeApplication); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ } else {
+ if err := step.Check(state); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ }
+ }
+ }
+
+ // Now, verify that Plan is now empty and we don't have a perpetual diff issue
+ // We do this with TWO plans. One without a refresh.
+ var p *terraform.Plan
+ if p, err = ctx.Plan(); err != nil {
+ return state, fmt.Errorf("Error on follow-up plan: %s", err)
+ }
+ if p.Diff != nil && !p.Diff.Empty() {
+ if step.ExpectNonEmptyPlan {
+ log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+ } else {
+ return state, fmt.Errorf(
+ "After applying this step, the plan was not empty:\n\n%s", p)
+ }
+ }
+
+ // And another after a Refresh.
+ if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
+ state, err = ctx.Refresh()
+ if err != nil {
+ return state, fmt.Errorf(
+ "Error on follow-up refresh: %s", err)
+ }
+ }
+ if p, err = ctx.Plan(); err != nil {
+ return state, fmt.Errorf("Error on second follow-up plan: %s", err)
+ }
+ empty := p.Diff == nil || p.Diff.Empty()
+
+ // Data resources are tricky because they legitimately get instantiated
+ // during refresh so that they will be already populated during the
+ // plan walk. Because of this, if we have any data resources in the
+ // config we'll end up wanting to destroy them again here. This is
+ // acceptable and expected, and we'll treat it as "empty" for the
+ // sake of this testing.
+ if step.Destroy {
+ empty = true
+
+ for _, moduleDiff := range p.Diff.Modules {
+ for k, instanceDiff := range moduleDiff.Resources {
+ if !strings.HasPrefix(k, "data.") {
+ empty = false
+ break
+ }
+
+ if !instanceDiff.Destroy {
+ empty = false
+ }
+ }
+ }
+ }
+
+ if !empty {
+ if step.ExpectNonEmptyPlan {
+ log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+ } else {
+ return state, fmt.Errorf(
+ "After applying this step and refreshing, "+
+ "the plan was not empty:\n\n%s", p)
+ }
+ }
+
+ // Made it here, but expected a non-empty plan, fail!
+ if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
+ return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
+ }
+
+ // Made it here? Good job test step!
+ return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
new file mode 100644
index 00000000..28ad1052
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -0,0 +1,141 @@
+package resource
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepImportState runs an imort state test step
+func testStepImportState(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ // Determine the ID to import
+ importId := step.ImportStateId
+ if importId == "" {
+ resource, err := testResource(step, state)
+ if err != nil {
+ return state, err
+ }
+
+ importId = resource.Primary.ID
+ }
+ importPrefix := step.ImportStateIdPrefix
+ if importPrefix != "" {
+ importId = fmt.Sprintf("%s%s", importPrefix, importId)
+ }
+
+ // Setup the context. We initialize with an empty state. We use the
+ // full config for provider configurations.
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return state, err
+ }
+
+ opts.Module = mod
+ opts.State = terraform.NewState()
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return state, err
+ }
+
+ // Do the import!
+ newState, err := ctx.Import(&terraform.ImportOpts{
+ // Set the module so that any provider config is loaded
+ Module: mod,
+
+ Targets: []*terraform.ImportTarget{
+ &terraform.ImportTarget{
+ Addr: step.ResourceName,
+ ID: importId,
+ },
+ },
+ })
+ if err != nil {
+ log.Printf("[ERROR] Test: ImportState failure: %s", err)
+ return state, err
+ }
+
+ // Go through the new state and verify
+ if step.ImportStateCheck != nil {
+ var states []*terraform.InstanceState
+ for _, r := range newState.RootModule().Resources {
+ if r.Primary != nil {
+ states = append(states, r.Primary)
+ }
+ }
+ if err := step.ImportStateCheck(states); err != nil {
+ return state, err
+ }
+ }
+
+ // Verify that all the states match
+ if step.ImportStateVerify {
+ new := newState.RootModule().Resources
+ old := state.RootModule().Resources
+ for _, r := range new {
+ // Find the existing resource
+ var oldR *terraform.ResourceState
+ for _, r2 := range old {
+ if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
+ oldR = r2
+ break
+ }
+ }
+ if oldR == nil {
+ return state, fmt.Errorf(
+ "Failed state verification, resource with ID %s not found",
+ r.Primary.ID)
+ }
+
+ // Compare their attributes
+ actual := make(map[string]string)
+ for k, v := range r.Primary.Attributes {
+ actual[k] = v
+ }
+ expected := make(map[string]string)
+ for k, v := range oldR.Primary.Attributes {
+ expected[k] = v
+ }
+
+ // Remove fields we're ignoring
+ for _, v := range step.ImportStateVerifyIgnore {
+ for k, _ := range actual {
+ if strings.HasPrefix(k, v) {
+ delete(actual, k)
+ }
+ }
+ for k, _ := range expected {
+ if strings.HasPrefix(k, v) {
+ delete(expected, k)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ // Determine only the different attributes
+ for k, v := range expected {
+ if av, ok := actual[k]; ok && v == av {
+ delete(expected, k)
+ delete(actual, k)
+ }
+ }
+
+ spewConf := spew.NewDefaultConfig()
+ spewConf.SortKeys = true
+ return state, fmt.Errorf(
+ "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+ "\n\n%s\n\n%s",
+ spewConf.Sdump(actual), spewConf.Sdump(expected))
+ }
+ }
+ }
+
+ // Return the old state (non-imported) so we don't change anything.
+ return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
new file mode 100644
index 00000000..ca50e292
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
@@ -0,0 +1,84 @@
+package resource
+
+import (
+ "sync"
+ "time"
+)
+
+// Retry is a basic wrapper around StateChangeConf that will just retry
+// a function until it no longer returns an error.
+func Retry(timeout time.Duration, f RetryFunc) error {
+ // These are used to pull the error out of the function; need a mutex to
+ // avoid a data race.
+ var resultErr error
+ var resultErrMu sync.Mutex
+
+ c := &StateChangeConf{
+ Pending: []string{"retryableerror"},
+ Target: []string{"success"},
+ Timeout: timeout,
+ MinTimeout: 500 * time.Millisecond,
+ Refresh: func() (interface{}, string, error) {
+ rerr := f()
+
+ resultErrMu.Lock()
+ defer resultErrMu.Unlock()
+
+ if rerr == nil {
+ resultErr = nil
+ return 42, "success", nil
+ }
+
+ resultErr = rerr.Err
+
+ if rerr.Retryable {
+ return 42, "retryableerror", nil
+ }
+ return nil, "quit", rerr.Err
+ },
+ }
+
+ _, waitErr := c.WaitForState()
+
+ // Need to acquire the lock here to be able to avoid race using resultErr as
+ // the return value
+ resultErrMu.Lock()
+ defer resultErrMu.Unlock()
+
+ // resultErr may be nil because the wait timed out and resultErr was never
+ // set; this is still an error
+ if resultErr == nil {
+ return waitErr
+ }
+ // resultErr takes precedence over waitErr if both are set because it is
+ // more likely to be useful
+ return resultErr
+}
+
+// RetryFunc is the function retried until it succeeds.
+type RetryFunc func() *RetryError
+
+// RetryError is the required return type of RetryFunc. It forces client code
+// to choose whether or not a given error is retryable.
+type RetryError struct {
+ Err error
+ Retryable bool
+}
+
+// RetryableError is a helper to create a RetryError that's retryable from a
+// given error.
+func RetryableError(err error) *RetryError {
+ if err == nil {
+ return nil
+ }
+ return &RetryError{Err: err, Retryable: true}
+}
+
+// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
+// from a given error.
+func NonRetryableError(err error) *RetryError {
+ if err == nil {
+ return nil
+ }
+ return &RetryError{Err: err, Retryable: false}
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
new file mode 100644
index 00000000..28c83628
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
@@ -0,0 +1,11 @@
+# Terraform Helper Lib: schema
+
+The `schema` package provides a high-level interface for writing resource
+providers for Terraform.
+
+If you're writing a resource provider, we recommend you use this package.
+
+The interface exposed by this package is much friendlier than trying to
+write to the Terraform API directly. The core Terraform API is low-level
+and built for maximum flexibility and control, whereas this library is built
+as a framework around that to more easily write common providers.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
new file mode 100644
index 00000000..a0729c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -0,0 +1,94 @@
+package schema
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Backend represents a partial backend.Backend implementation and simplifies
+// the creation of configuration loading and validation.
+//
+// Unlike other schema structs such as Provider, this struct is meant to be
+// embedded within your actual implementation. It provides implementations
+// only for Input and Configure and gives you a method for accessing the
+// configuration in the form of a ResourceData that you're expected to call
+// from the other implementation funcs.
+type Backend struct {
+ // Schema is the schema for the configuration of this backend. If this
+ // Backend has no configuration this can be omitted.
+ Schema map[string]*Schema
+
+ // ConfigureFunc is called to configure the backend. Use the
+ // FromContext* methods to extract information from the context.
+ // This can be nil, in which case nothing will be called but the
+ // config will still be stored.
+ ConfigureFunc func(context.Context) error
+
+ config *ResourceData
+}
+
+var (
+ backendConfigKey = contextKey("backend config")
+)
+
+// FromContextBackendConfig extracts a ResourceData with the configuration
+// from the context. This should only be called by Backend functions.
+func FromContextBackendConfig(ctx context.Context) *ResourceData {
+ return ctx.Value(backendConfigKey).(*ResourceData)
+}
+
+func (b *Backend) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ if b == nil {
+ return c, nil
+ }
+
+ return schemaMap(b.Schema).Input(input, c)
+}
+
+func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ if b == nil {
+ return nil, nil
+ }
+
+ return schemaMap(b.Schema).Validate(c)
+}
+
+func (b *Backend) Configure(c *terraform.ResourceConfig) error {
+ if b == nil {
+ return nil
+ }
+
+ sm := schemaMap(b.Schema)
+
+ // Get a ResourceData for this configuration. To do this, we actually
+ // generate an intermediary "diff" although that is never exposed.
+ diff, err := sm.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+
+ data, err := sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ b.config = data
+
+ if b.ConfigureFunc != nil {
+ err = b.ConfigureFunc(context.WithValue(
+ context.Background(), backendConfigKey, data))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Config returns the configuration. This is available after Configure is
+// called.
+func (b *Backend) Config() *ResourceData {
+ return b.config
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
new file mode 100644
index 00000000..5a03d2d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
@@ -0,0 +1,59 @@
+package schema
+
+import (
+ "fmt"
+)
+
+// DataSourceResourceShim takes a Resource instance describing a data source
+// (with a Read implementation and a Schema, at least) and returns a new
+// Resource instance with additional Create and Delete implementations that
+// allow the data source to be used as a resource.
+//
+// This is a backward-compatibility layer for data sources that were formerly
+// read-only resources before the data source concept was added. It should not
+// be used for any *new* data sources.
+//
+// The Read function for the data source *must* call d.SetId with a non-empty
+// id in order for this shim to function as expected.
+//
+// The provided Resource instance, and its schema, will be modified in-place
+// to make it suitable for use as a full resource.
+func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
+ // Recursively, in-place adjust the schema so that it has ForceNew
+ // on any user-settable resource.
+ dataSourceResourceShimAdjustSchema(dataSource.Schema)
+
+ dataSource.Create = CreateFunc(dataSource.Read)
+ dataSource.Delete = func(d *ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+ }
+ dataSource.Update = nil // should already be nil, but let's make sure
+
+ // FIXME: Link to some further docs either on the website or in the
+ // changelog, once such a thing exists.
+ dataSource.deprecationMessage = fmt.Sprintf(
+ "using %s as a resource is deprecated; consider using the data source instead",
+ name,
+ )
+
+ return dataSource
+}
+
+func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
+ for _, s := range schema {
+ // If the attribute is configurable then it must be ForceNew,
+ // since we have no Update implementation.
+ if s.Required || s.Optional {
+ s.ForceNew = true
+ }
+
+ // If the attribute is a nested resource, we need to recursively
+ // apply these same adjustments to it.
+ if s.Elem != nil {
+ if r, ok := s.Elem.(*Resource); ok {
+ dataSourceResourceShimAdjustSchema(r.Schema)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
new file mode 100644
index 00000000..d5e20e03
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
@@ -0,0 +1,6 @@
+package schema
+
+// Equal is an interface that checks for deep equality between two objects.
+type Equal interface {
+ Equal(interface{}) bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
new file mode 100644
index 00000000..1660a670
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -0,0 +1,334 @@
+package schema
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// FieldReaders are responsible for decoding fields out of data into
+// the proper typed representation. ResourceData uses this to query data
+// out of multiple sources: config, state, diffs, etc.
+type FieldReader interface {
+ ReadField([]string) (FieldReadResult, error)
+}
+
+// FieldReadResult encapsulates all the resulting data from reading
+// a field.
+type FieldReadResult struct {
+ // Value is the actual read value. NegValue is the _negative_ value
+ // or the items that should be removed (if they existed). NegValue
+ // doesn't make sense for primitives but is important for any
+ // container types such as maps, sets, lists.
+ Value interface{}
+ ValueProcessed interface{}
+
+ // Exists is true if the field was found in the data. False means
+ // it wasn't found if there was no error.
+ Exists bool
+
+ // Computed is true if the field was found but the value
+ // is computed.
+ Computed bool
+}
+
+// ValueOrZero returns the value of this result or the zero value of the
+// schema type, ensuring a consistent non-nil return value.
+func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
+ if r.Value != nil {
+ return r.Value
+ }
+
+ return s.ZeroValue()
+}
+
+// addrToSchema finds the final element schema for the given address
+// and the given schema. It returns all the schemas that led to the final
+// schema. These are in order of the address (out to in).
+func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
+ current := &Schema{
+ Type: typeObject,
+ Elem: schemaMap,
+ }
+
+ // If we aren't given an address, then the user is requesting the
+ // full object, so we return the special value which is the full object.
+ if len(addr) == 0 {
+ return []*Schema{current}
+ }
+
+ result := make([]*Schema, 0, len(addr))
+ for len(addr) > 0 {
+ k := addr[0]
+ addr = addr[1:]
+
+ REPEAT:
+ // We want to trim off the first "typeObject" since its not a
+ // real lookup that people do. i.e. []string{"foo"} in a structure
+ // isn't {typeObject, typeString}, its just a {typeString}.
+ if len(result) > 0 || current.Type != typeObject {
+ result = append(result, current)
+ }
+
+ switch t := current.Type; t {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ if len(addr) > 0 {
+ return nil
+ }
+ case TypeList, TypeSet:
+ isIndex := len(addr) > 0 && addr[0] == "#"
+
+ switch v := current.Elem.(type) {
+ case *Resource:
+ current = &Schema{
+ Type: typeObject,
+ Elem: v.Schema,
+ }
+ case *Schema:
+ current = v
+ case ValueType:
+ current = &Schema{Type: v}
+ default:
+ // we may not know the Elem type and are just looking for the
+ // index
+ if isIndex {
+ break
+ }
+
+ if len(addr) == 0 {
+ // we've processed the address, so return what we've
+ // collected
+ return result
+ }
+
+ if len(addr) == 1 {
+ if _, err := strconv.Atoi(addr[0]); err == nil {
+ // we're indexing a value without a schema. This can
+ // happen if the list is nested in another schema type.
+ // Default to a TypeString like we do with a map
+ current = &Schema{Type: TypeString}
+ break
+ }
+ }
+
+ return nil
+ }
+
+ // If we only have one more thing and the next thing
+ // is a #, then we're accessing the index which is always
+ // an int.
+ if isIndex {
+ current = &Schema{Type: TypeInt}
+ break
+ }
+
+ case TypeMap:
+ if len(addr) > 0 {
+ switch v := current.Elem.(type) {
+ case ValueType:
+ current = &Schema{Type: v}
+ default:
+ // maps default to string values. This is all we can have
+ // if this is nested in another list or map.
+ current = &Schema{Type: TypeString}
+ }
+ }
+ case typeObject:
+ // If we're already in the object, then we want to handle Sets
+ // and Lists specially. Basically, their next key is the lookup
+ // key (the set value or the list element). For these scenarios,
+ // we just want to skip it and move to the next element if there
+ // is one.
+ if len(result) > 0 {
+ lastType := result[len(result)-2].Type
+ if lastType == TypeSet || lastType == TypeList {
+ if len(addr) == 0 {
+ break
+ }
+
+ k = addr[0]
+ addr = addr[1:]
+ }
+ }
+
+ m := current.Elem.(map[string]*Schema)
+ val, ok := m[k]
+ if !ok {
+ return nil
+ }
+
+ current = val
+ goto REPEAT
+ }
+ }
+
+ return result
+}
+
+// readListField is a generic method for reading a list field out of a
+// a FieldReader. It does this based on the assumption that there is a key
+// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
+// after that point.
+func readListField(
+ r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
+ addrPadded := make([]string, len(addr)+1)
+ copy(addrPadded, addr)
+ addrPadded[len(addrPadded)-1] = "#"
+
+ // Get the number of elements in the list
+ countResult, err := r.ReadField(addrPadded)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !countResult.Exists {
+ // No count, means we have no list
+ countResult.Value = 0
+ }
+
+ // If we have an empty list, then return an empty list
+ if countResult.Computed || countResult.Value.(int) == 0 {
+ return FieldReadResult{
+ Value: []interface{}{},
+ Exists: countResult.Exists,
+ Computed: countResult.Computed,
+ }, nil
+ }
+
+ // Go through each count, and get the item value out of it
+ result := make([]interface{}, countResult.Value.(int))
+ for i, _ := range result {
+ is := strconv.FormatInt(int64(i), 10)
+ addrPadded[len(addrPadded)-1] = is
+ rawResult, err := r.ReadField(addrPadded)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !rawResult.Exists {
+ // This should never happen, because by the time the data
+ // gets to the FieldReaders, all the defaults should be set by
+ // Schema.
+ rawResult.Value = nil
+ }
+
+ result[i] = rawResult.Value
+ }
+
+ return FieldReadResult{
+ Value: result,
+ Exists: true,
+ }, nil
+}
+
+// readObjectField is a generic method for reading objects out of FieldReaders
+// based on the assumption that building an address of []string{k, FIELD}
+// will result in the proper field data.
+func readObjectField(
+ r FieldReader,
+ addr []string,
+ schema map[string]*Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ exists := false
+ for field, s := range schema {
+ addrRead := make([]string, len(addr), len(addr)+1)
+ copy(addrRead, addr)
+ addrRead = append(addrRead, field)
+ rawResult, err := r.ReadField(addrRead)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if rawResult.Exists {
+ exists = true
+ }
+
+ result[field] = rawResult.ValueOrZero(s)
+ }
+
+ return FieldReadResult{
+ Value: result,
+ Exists: exists,
+ }, nil
+}
+
+// convert map values to the proper primitive type based on schema.Elem
+func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
+
+ elemType := TypeString
+ if et, ok := schema.Elem.(ValueType); ok {
+ elemType = et
+ }
+
+ switch elemType {
+ case TypeInt, TypeFloat, TypeBool:
+ for k, v := range m {
+ vs, ok := v.(string)
+ if !ok {
+ continue
+ }
+
+ v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
+ if err != nil {
+ return err
+ }
+
+ m[k] = v
+ }
+ }
+ return nil
+}
+
+func stringToPrimitive(
+ value string, computed bool, schema *Schema) (interface{}, error) {
+ var returnVal interface{}
+ switch schema.Type {
+ case TypeBool:
+ if value == "" {
+ returnVal = false
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = v
+ case TypeFloat:
+ if value == "" {
+ returnVal = 0.0
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = v
+ case TypeInt:
+ if value == "" {
+ returnVal = 0
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseInt(value, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = int(v)
+ case TypeString:
+ returnVal = value
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+
+ return returnVal, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
new file mode 100644
index 00000000..f958bbcb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -0,0 +1,333 @@
+package schema
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// ConfigFieldReader reads fields out of an untyped map[string]string to the
+// best of its ability. It also applies defaults from the Schema. (The other
+// field readers do not need default handling because they source fully
+// populated data structures.)
+type ConfigFieldReader struct {
+ Config *terraform.ResourceConfig
+ Schema map[string]*Schema
+
+ indexMaps map[string]map[string]int
+ once sync.Once
+}
+
+func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
+ return r.readField(address, false)
+}
+
+func (r *ConfigFieldReader) readField(
+ address []string, nested bool) (FieldReadResult, error) {
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ if !nested {
+ // If we have a set anywhere in the address, then we need to
+ // read that set out in order and actually replace that part of
+ // the address with the real list index. i.e. set.50 might actually
+ // map to set.12 in the config, since it is in list order in the
+ // config, not indexed by set value.
+ for i, v := range schemaList {
+ // Sets are the only thing that cause this issue.
+ if v.Type != TypeSet {
+ continue
+ }
+
+ // If we're at the end of the list, then we don't have to worry
+ // about this because we're just requesting the whole set.
+ if i == len(schemaList)-1 {
+ continue
+ }
+
+ // If we're looking for the count, then ignore...
+ if address[i+1] == "#" {
+ continue
+ }
+
+ indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
+ if !ok {
+ // Get the set so we can get the index map that tells us the
+ // mapping of the hash code to the list index
+ _, err := r.readSet(address[:i+1], v)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
+ }
+
+ index, ok := indexMap[address[i+1]]
+ if !ok {
+ return FieldReadResult{}, nil
+ }
+
+ address[i+1] = strconv.FormatInt(int64(index), 10)
+ }
+ }
+
+ k := strings.Join(address, ".")
+ schema := schemaList[len(schemaList)-1]
+
+ // If we're getting the single element of a promoted list, then
+ // check to see if we have a single element we need to promote.
+ if address[len(address)-1] == "0" && len(schemaList) > 1 {
+ lastSchema := schemaList[len(schemaList)-2]
+ if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
+ k := strings.Join(address[:len(address)-1], ".")
+ result, err := r.readPrimitive(k, schema)
+ if err == nil {
+ return result, nil
+ }
+ }
+ }
+
+ switch schema.Type {
+ case TypeBool, TypeFloat, TypeInt, TypeString:
+ return r.readPrimitive(k, schema)
+ case TypeList:
+ // If we support promotion then we first check if we have a lone
+ // value that we must promote.
+ // a value that is alone.
+ if schema.PromoteSingle {
+ result, err := r.readPrimitive(k, schema.Elem.(*Schema))
+ if err == nil && result.Exists {
+ result.Value = []interface{}{result.Value}
+ return result, nil
+ }
+ }
+
+ return readListField(&nestedConfigFieldReader{r}, address, schema)
+ case TypeMap:
+ return r.readMap(k, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(
+ &nestedConfigFieldReader{r},
+ address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
+
+func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+ // We want both the raw value and the interpolated. We use the interpolated
+ // to store actual values and we use the raw one to check for
+ // computed keys. Actual values are obtained in the switch, depending on
+ // the type of the raw value.
+ mraw, ok := r.Config.GetRaw(k)
+ if !ok {
+ // check if this is from an interpolated field by seeing if it exists
+ // in the config
+ _, ok := r.Config.Get(k)
+ if !ok {
+ // this really doesn't exist
+ return FieldReadResult{}, nil
+ }
+
+ // We couldn't fetch the value from a nested data structure, so treat the
+ // raw value as an interpolation string. The mraw value is only used
+ // for the type switch below.
+ mraw = "${INTERPOLATED}"
+ }
+
+ result := make(map[string]interface{})
+ computed := false
+ switch m := mraw.(type) {
+ case string:
+ // This is a map which has come out of an interpolated variable, so we
+ // can just get the value directly from config. Values cannot be computed
+ // currently.
+ v, _ := r.Config.Get(k)
+
+ // If this isn't a map[string]interface, it must be computed.
+ mapV, ok := v.(map[string]interface{})
+ if !ok {
+ return FieldReadResult{
+ Exists: true,
+ Computed: true,
+ }, nil
+ }
+
+ // Otherwise we can proceed as usual.
+ for i, iv := range mapV {
+ result[i] = iv
+ }
+ case []interface{}:
+ for i, innerRaw := range m {
+ for ik := range innerRaw.(map[string]interface{}) {
+ key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ }
+ case []map[string]interface{}:
+ for i, innerRaw := range m {
+ for ik := range innerRaw {
+ key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ }
+ case map[string]interface{}:
+ for ik := range m {
+ key := fmt.Sprintf("%s.%s", k, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ default:
+ panic(fmt.Sprintf("unknown type: %#v", mraw))
+ }
+
+ err := mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var value interface{}
+ if !computed {
+ value = result
+ }
+
+ return FieldReadResult{
+ Value: value,
+ Exists: true,
+ Computed: computed,
+ }, nil
+}
+
+func (r *ConfigFieldReader) readPrimitive(
+ k string, schema *Schema) (FieldReadResult, error) {
+ raw, ok := r.Config.Get(k)
+ if !ok {
+ // Nothing in config, but we might still have a default from the schema
+ var err error
+ raw, err = schema.DefaultValue()
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
+ }
+
+ if raw == nil {
+ return FieldReadResult{}, nil
+ }
+ }
+
+ var result string
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return FieldReadResult{}, err
+ }
+
+ computed := r.Config.IsComputed(k)
+ returnVal, err := stringToPrimitive(result, computed, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: returnVal,
+ Exists: true,
+ Computed: computed,
+ }, nil
+}
+
+func (r *ConfigFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ indexMap := make(map[string]int)
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !raw.Exists {
+ return FieldReadResult{Value: set}, nil
+ }
+
+ // If the list is computed, the set is necessarilly computed
+ if raw.Computed {
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ Computed: raw.Computed,
+ }, nil
+ }
+
+ // Build up the set from the list elements
+ for i, v := range raw.Value.([]interface{}) {
+ // Check if any of the keys in this item are computed
+ computed := r.hasComputedSubKeys(
+ fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
+
+ code := set.add(v, computed)
+ indexMap[code] = i
+ }
+
+ r.indexMaps[strings.Join(address, ".")] = indexMap
+
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ }, nil
+}
+
+// hasComputedSubKeys walks through a schema and returns whether or not the
+// given key contains any subkeys that are computed.
+func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
+ prefix := key + "."
+
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ for k, schema := range t.Schema {
+ if r.Config.IsComputed(prefix + k) {
+ return true
+ }
+
+ if r.hasComputedSubKeys(prefix+k, schema) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// nestedConfigFieldReader is a funny little thing that just wraps a
+// ConfigFieldReader to call readField when ReadField is called so that
+// we don't recalculate the set rewrites in the address, which leads to
+// an infinite loop.
+type nestedConfigFieldReader struct {
+ Reader *ConfigFieldReader
+}
+
+func (r *nestedConfigFieldReader) ReadField(
+ address []string) (FieldReadResult, error) {
+ return r.Reader.readField(address, true)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
new file mode 100644
index 00000000..16bbae29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -0,0 +1,208 @@
+package schema
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// DiffFieldReader reads fields out of a diff structures.
+//
+// It also requires access to a Reader that reads fields from the structure
+// that the diff was derived from. This is usually the state. This is required
+// because a diff on its own doesn't have complete data about full objects
+// such as maps.
+//
+// The Source MUST be the data that the diff was derived from. If it isn't,
+// the behavior of this struct is undefined.
+//
+// Reading fields from a DiffFieldReader is identical to reading from
+// Source except the diff will be applied to the end result.
+//
+// The "Exists" field on the result will be set to true if the complete
+// field exists whether its from the source, diff, or a combination of both.
+// It cannot be determined whether a retrieved value is composed of
+// diff elements.
+type DiffFieldReader struct {
+ Diff *terraform.InstanceDiff
+ Source FieldReader
+ Schema map[string]*Schema
+}
+
+func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return r.readPrimitive(address, schema)
+ case TypeList:
+ return readListField(r, address, schema)
+ case TypeMap:
+ return r.readMap(address, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+ }
+}
+
+func (r *DiffFieldReader) readMap(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ resultSet := false
+
+ // First read the map from the underlying source
+ source, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if source.Exists {
+ result = source.Value.(map[string]interface{})
+ resultSet = true
+ }
+
+ // Next, read all the elements we have in our diff, and apply
+ // the diff to our result.
+ prefix := strings.Join(address, ".") + "."
+ for k, v := range r.Diff.Attributes {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ if strings.HasPrefix(k, prefix+"%") {
+ // Ignore the count field
+ continue
+ }
+
+ resultSet = true
+
+ k = k[len(prefix):]
+ if v.NewRemoved {
+ delete(result, k)
+ continue
+ }
+
+ result[k] = v.New
+ }
+
+ err = mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var resultVal interface{}
+ if resultSet {
+ resultVal = result
+ }
+
+ return FieldReadResult{
+ Value: resultVal,
+ Exists: resultSet,
+ }, nil
+}
+
+func (r *DiffFieldReader) readPrimitive(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ result, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
+ if !ok {
+ return result, nil
+ }
+
+ var resultVal string
+ if !attrD.NewComputed {
+ resultVal = attrD.New
+ if attrD.NewExtra != nil {
+ result.ValueProcessed = resultVal
+ if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
+ return FieldReadResult{}, err
+ }
+ }
+ }
+
+ result.Computed = attrD.NewComputed
+ result.Exists = true
+ result.Value, err = stringToPrimitive(resultVal, false, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return result, nil
+}
+
+func (r *DiffFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ prefix := strings.Join(address, ".") + "."
+
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ // Go through the map and find all the set items
+ for k, d := range r.Diff.Attributes {
+ if d.NewRemoved {
+ // If the field is removed, we always ignore it
+ continue
+ }
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ if strings.HasSuffix(k, "#") {
+ // Ignore any count field
+ continue
+ }
+
+ // Split the key, since it might be a sub-object like "idx.field"
+ parts := strings.Split(k[len(prefix):], ".")
+ idx := parts[0]
+
+ raw, err := r.ReadField(append(address, idx))
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !raw.Exists {
+ // This shouldn't happen because we just verified it does exist
+ panic("missing field in set: " + k + "." + idx)
+ }
+
+ set.Add(raw.Value)
+ }
+
+ // Determine if the set "exists". It exists if there are items or if
+ // the diff explicitly wanted it empty.
+ exists := set.Len() > 0
+ if !exists {
+ // We could check if the diff value is "0" here but I think the
+ // existence of "#" on its own is enough to show it existed. This
+ // protects us in the future from the zero value changing from
+ // "0" to "" breaking us (if that were to happen).
+ if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
+ exists = true
+ }
+ }
+
+ if !exists {
+ result, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if result.Exists {
+ return result, nil
+ }
+ }
+
+ return FieldReadResult{
+ Value: set,
+ Exists: exists,
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
new file mode 100644
index 00000000..95339810
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -0,0 +1,232 @@
+package schema
+
+import (
+ "fmt"
+ "strings"
+)
+
+// MapFieldReader reads fields out of an untyped map[string]string to
+// the best of its ability.
+type MapFieldReader struct {
+ Map MapReader
+ Schema map[string]*Schema
+}
+
+func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ k := strings.Join(address, ".")
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return r.readPrimitive(address, schema)
+ case TypeList:
+ return readListField(r, address, schema)
+ case TypeMap:
+ return r.readMap(k, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
+
+func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ resultSet := false
+
+ // If the name of the map field is directly in the map with an
+ // empty string, it means that the map is being deleted, so mark
+ // that is is set.
+ if v, ok := r.Map.Access(k); ok && v == "" {
+ resultSet = true
+ }
+
+ prefix := k + "."
+ r.Map.Range(func(k, v string) bool {
+ if strings.HasPrefix(k, prefix) {
+ resultSet = true
+
+ key := k[len(prefix):]
+ if key != "%" && key != "#" {
+ result[key] = v
+ }
+ }
+
+ return true
+ })
+
+ err := mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var resultVal interface{}
+ if resultSet {
+ resultVal = result
+ }
+
+ return FieldReadResult{
+ Value: resultVal,
+ Exists: resultSet,
+ }, nil
+}
+
+func (r *MapFieldReader) readPrimitive(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ k := strings.Join(address, ".")
+ result, ok := r.Map.Access(k)
+ if !ok {
+ return FieldReadResult{}, nil
+ }
+
+ returnVal, err := stringToPrimitive(result, false, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: returnVal,
+ Exists: true,
+ }, nil
+}
+
+func (r *MapFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ // Get the number of elements in the list
+ countRaw, err := r.readPrimitive(
+ append(address, "#"), &Schema{Type: TypeInt})
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !countRaw.Exists {
+ // No count, means we have no list
+ countRaw.Value = 0
+ }
+
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ // If we have an empty list, then return an empty list
+ if countRaw.Computed || countRaw.Value.(int) == 0 {
+ return FieldReadResult{
+ Value: set,
+ Exists: countRaw.Exists,
+ Computed: countRaw.Computed,
+ }, nil
+ }
+
+ // Go through the map and find all the set items
+ prefix := strings.Join(address, ".") + "."
+ countExpected := countRaw.Value.(int)
+ countActual := make(map[string]struct{})
+ completed := r.Map.Range(func(k, _ string) bool {
+ if !strings.HasPrefix(k, prefix) {
+ return true
+ }
+ if strings.HasPrefix(k, prefix+"#") {
+ // Ignore the count field
+ return true
+ }
+
+ // Split the key, since it might be a sub-object like "idx.field"
+ parts := strings.Split(k[len(prefix):], ".")
+ idx := parts[0]
+
+ var raw FieldReadResult
+ raw, err = r.ReadField(append(address, idx))
+ if err != nil {
+ return false
+ }
+ if !raw.Exists {
+ // This shouldn't happen because we just verified it does exist
+ panic("missing field in set: " + k + "." + idx)
+ }
+
+ set.Add(raw.Value)
+
+ // Due to the way multimap readers work, if we've seen the number
+ // of fields we expect, then exit so that we don't read later values.
+ // For example: the "set" map might have "ports.#", "ports.0", and
+ // "ports.1", but the "state" map might have those plus "ports.2".
+ // We don't want "ports.2"
+ countActual[idx] = struct{}{}
+ if len(countActual) >= countExpected {
+ return false
+ }
+
+ return true
+ })
+ if !completed && err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ }, nil
+}
+
+// MapReader is an interface that is given to MapFieldReader for accessing
+// a "map". This can be used to have alternate implementations. For a basic
+// map[string]string, use BasicMapReader.
+type MapReader interface {
+ Access(string) (string, bool)
+ Range(func(string, string) bool) bool
+}
+
+// BasicMapReader implements MapReader for a single map.
+type BasicMapReader map[string]string
+
+func (r BasicMapReader) Access(k string) (string, bool) {
+ v, ok := r[k]
+ return v, ok
+}
+
+func (r BasicMapReader) Range(f func(string, string) bool) bool {
+ for k, v := range r {
+ if cont := f(k, v); !cont {
+ return false
+ }
+ }
+
+ return true
+}
+
+// MultiMapReader reads over multiple maps, preferring keys that are
+// founder earlier (lower number index) vs. later (higher number index)
+type MultiMapReader []map[string]string
+
+func (r MultiMapReader) Access(k string) (string, bool) {
+ for _, m := range r {
+ if v, ok := m[k]; ok {
+ return v, ok
+ }
+ }
+
+ return "", false
+}
+
+func (r MultiMapReader) Range(f func(string, string) bool) bool {
+ done := make(map[string]struct{})
+ for _, m := range r {
+ for k, v := range m {
+ if _, ok := done[k]; ok {
+ continue
+ }
+
+ if cont := f(k, v); !cont {
+ return false
+ }
+
+ done[k] = struct{}{}
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
new file mode 100644
index 00000000..89ad3a86
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
@@ -0,0 +1,63 @@
+package schema
+
+import (
+ "fmt"
+)
+
+// MultiLevelFieldReader reads from other field readers,
+// merging their results along the way in a specific order. You can specify
+// "levels" and name them in order to read only an exact level or up to
+// a specific level.
+//
+// This is useful for saying things such as "read the field from the state
+// and config and merge them" or "read the latest value of the field".
+type MultiLevelFieldReader struct {
+ Readers map[string]FieldReader
+ Levels []string
+}
+
+func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
+}
+
+func (r *MultiLevelFieldReader) ReadFieldExact(
+ address []string, level string) (FieldReadResult, error) {
+ reader, ok := r.Readers[level]
+ if !ok {
+ return FieldReadResult{}, fmt.Errorf(
+ "Unknown reader level: %s", level)
+ }
+
+ result, err := reader.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf(
+ "Error reading level %s: %s", level, err)
+ }
+
+ return result, nil
+}
+
+func (r *MultiLevelFieldReader) ReadFieldMerge(
+ address []string, level string) (FieldReadResult, error) {
+ var result FieldReadResult
+ for _, l := range r.Levels {
+ if r, ok := r.Readers[l]; ok {
+ out, err := r.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf(
+ "Error reading level %s: %s", l, err)
+ }
+
+ // TODO: computed
+ if out.Exists {
+ result = out
+ }
+ }
+
+ if l == level {
+ break
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
new file mode 100644
index 00000000..9abc41b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
@@ -0,0 +1,8 @@
+package schema
+
+// FieldWriters are responsible for writing fields by address into
+// a proper typed representation. ResourceData uses this to write new data
+// into existing sources.
+type FieldWriter interface {
+ WriteField([]string, interface{}) error
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
new file mode 100644
index 00000000..689ed8d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -0,0 +1,319 @@
+package schema
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+// MapFieldWriter writes data into a single map[string]string structure.
+type MapFieldWriter struct {
+ Schema map[string]*Schema
+
+ lock sync.Mutex
+ result map[string]string
+}
+
+// Map returns the underlying map that is being written to.
+func (w *MapFieldWriter) Map() map[string]string {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ return w.result
+}
+
+func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ w.result[addr] = value
+}
+
+func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ schemaList := addrToSchema(addr, w.Schema)
+ if len(schemaList) == 0 {
+ return fmt.Errorf("Invalid address to set: %#v", addr)
+ }
+
+ // If we're setting anything other than a list root or set root,
+ // then disallow it.
+ for _, schema := range schemaList[:len(schemaList)-1] {
+ if schema.Type == TypeList {
+ return fmt.Errorf(
+ "%s: can only set full list",
+ strings.Join(addr, "."))
+ }
+
+ if schema.Type == TypeMap {
+ return fmt.Errorf(
+ "%s: can only set full map",
+ strings.Join(addr, "."))
+ }
+
+ if schema.Type == TypeSet {
+ return fmt.Errorf(
+ "%s: can only set full set",
+ strings.Join(addr, "."))
+ }
+ }
+
+ return w.set(addr, value)
+}
+
+func (w *MapFieldWriter) set(addr []string, value interface{}) error {
+ schemaList := addrToSchema(addr, w.Schema)
+ if len(schemaList) == 0 {
+ return fmt.Errorf("Invalid address to set: %#v", addr)
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return w.setPrimitive(addr, value, schema)
+ case TypeList:
+ return w.setList(addr, value, schema)
+ case TypeMap:
+ return w.setMap(addr, value, schema)
+ case TypeSet:
+ return w.setSet(addr, value, schema)
+ case typeObject:
+ return w.setObject(addr, value, schema)
+ default:
+ panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+ }
+}
+
+func (w *MapFieldWriter) setList(
+ addr []string,
+ v interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+ setElement := func(idx string, value interface{}) error {
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ return w.set(append(addrCopy, idx), value)
+ }
+
+ var vs []interface{}
+ if err := mapstructure.Decode(v, &vs); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ // Set the entire list.
+ var err error
+ for i, elem := range vs {
+ is := strconv.FormatInt(int64(i), 10)
+ err = setElement(is, elem)
+ if err != nil {
+ break
+ }
+ }
+ if err != nil {
+ for i, _ := range vs {
+ is := strconv.FormatInt(int64(i), 10)
+ setElement(is, nil)
+ }
+
+ return err
+ }
+
+ w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
+ return nil
+}
+
+func (w *MapFieldWriter) setMap(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+ v := reflect.ValueOf(value)
+ vs := make(map[string]interface{})
+
+ if value == nil {
+ // The empty string here means the map is removed.
+ w.result[k] = ""
+ return nil
+ }
+
+ if v.Kind() != reflect.Map {
+ return fmt.Errorf("%s: must be a map", k)
+ }
+ if v.Type().Key().Kind() != reflect.String {
+ return fmt.Errorf("%s: keys must strings", k)
+ }
+ for _, mk := range v.MapKeys() {
+ mv := v.MapIndex(mk)
+ vs[mk.String()] = mv.Interface()
+ }
+
+ // Remove the pure key since we're setting the full map value
+ delete(w.result, k)
+
+ // Set each subkey
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ for subKey, v := range vs {
+ if err := w.set(append(addrCopy, subKey), v); err != nil {
+ return err
+ }
+ }
+
+ // Set the count
+ w.result[k+".%"] = strconv.Itoa(len(vs))
+
+ return nil
+}
+
+func (w *MapFieldWriter) setObject(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ // Set the entire object. First decode into a proper structure
+ var v map[string]interface{}
+ if err := mapstructure.Decode(value, &v); err != nil {
+ return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
+ }
+
+ // Make space for additional elements in the address
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+
+ // Set each element in turn
+ var err error
+ for k1, v1 := range v {
+ if err = w.set(append(addrCopy, k1), v1); err != nil {
+ break
+ }
+ }
+ if err != nil {
+ for k1, _ := range v {
+ w.set(append(addrCopy, k1), nil)
+ }
+ }
+
+ return err
+}
+
+func (w *MapFieldWriter) setPrimitive(
+ addr []string,
+ v interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+
+ if v == nil {
+ // The empty string here means the value is removed.
+ w.result[k] = ""
+ return nil
+ }
+
+ var set string
+ switch schema.Type {
+ case TypeBool:
+ var b bool
+ if err := mapstructure.Decode(v, &b); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ set = strconv.FormatBool(b)
+ case TypeString:
+ if err := mapstructure.Decode(v, &set); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ case TypeInt:
+ var n int
+ if err := mapstructure.Decode(v, &n); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ set = strconv.FormatInt(int64(n), 10)
+ case TypeFloat:
+ var n float64
+ if err := mapstructure.Decode(v, &n); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ set = strconv.FormatFloat(float64(n), 'G', -1, 64)
+ default:
+ return fmt.Errorf("Unknown type: %#v", schema.Type)
+ }
+
+ w.result[k] = set
+ return nil
+}
+
+func (w *MapFieldWriter) setSet(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ k := strings.Join(addr, ".")
+
+ if value == nil {
+ w.result[k+".#"] = "0"
+ return nil
+ }
+
+ // If it is a slice, then we have to turn it into a *Set so that
+ // we get the proper order back based on the hash code.
+ if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
+ // Build a temp *ResourceData to use for the conversion
+ tempSchema := *schema
+ tempSchema.Type = TypeList
+ tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema}
+ tempW := &MapFieldWriter{Schema: tempSchemaMap}
+
+ // Set the entire list, this lets us get sane values out of it
+ if err := tempW.WriteField(addr, value); err != nil {
+ return err
+ }
+
+ // Build the set by going over the list items in order and
+ // hashing them into the set. The reason we go over the list and
+ // not the `value` directly is because this forces all types
+ // to become []interface{} (generic) instead of []string, which
+ // most hash functions are expecting.
+ s := schema.ZeroValue().(*Set)
+ tempR := &MapFieldReader{
+ Map: BasicMapReader(tempW.Map()),
+ Schema: tempSchemaMap,
+ }
+ for i := 0; i < v.Len(); i++ {
+ is := strconv.FormatInt(int64(i), 10)
+ result, err := tempR.ReadField(append(addrCopy, is))
+ if err != nil {
+ return err
+ }
+ if !result.Exists {
+ panic("set item just set doesn't exist")
+ }
+
+ s.Add(result.Value)
+ }
+
+ value = s
+ }
+
+ for code, elem := range value.(*Set).m {
+ if err := w.set(append(addrCopy, code), elem); err != nil {
+ return err
+ }
+ }
+
+ w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
new file mode 100644
index 00000000..3a976293
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -0,0 +1,36 @@
+// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const (
+ _getSource_name_0 = "getSourceStategetSourceConfig"
+ _getSource_name_1 = "getSourceDiff"
+ _getSource_name_2 = "getSourceSet"
+ _getSource_name_3 = "getSourceLevelMaskgetSourceExact"
+)
+
+var (
+ _getSource_index_0 = [...]uint8{0, 14, 29}
+ _getSource_index_1 = [...]uint8{0, 13}
+ _getSource_index_2 = [...]uint8{0, 12}
+ _getSource_index_3 = [...]uint8{0, 18, 32}
+)
+
+func (i getSource) String() string {
+ switch {
+ case 1 <= i && i <= 2:
+ i -= 1
+ return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
+ case i == 4:
+ return _getSource_name_1
+ case i == 8:
+ return _getSource_name_2
+ case 15 <= i && i <= 16:
+ i -= 15
+ return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
+ default:
+ return fmt.Sprintf("getSource(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
new file mode 100644
index 00000000..d52d2f5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -0,0 +1,400 @@
+package schema
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider represents a resource provider in Terraform, and properly
+// implements all of the ResourceProvider API.
+//
+// By defining a schema for the configuration of the provider, the
+// map of supporting resources, and a configuration function, the schema
+// framework takes over and handles all the provider operations for you.
+//
+// After defining the provider structure, it is unlikely that you'll require any
+// of the methods on Provider itself.
+type Provider struct {
+ // Schema is the schema for the configuration of this provider. If this
+ // provider has no configuration, this can be omitted.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ Schema map[string]*Schema
+
+ // ResourcesMap is the list of available resources that this provider
+ // can manage, along with their Resource structure defining their
+ // own schemas and CRUD operations.
+ //
+ // Provider automatically handles routing operations such as Apply,
+ // Diff, etc. to the proper resource.
+ ResourcesMap map[string]*Resource
+
+ // DataSourcesMap is the collection of available data sources that
+ // this provider implements, with a Resource instance defining
+ // the schema and Read operation of each.
+ //
+ // Resource instances for data sources must have a Read function
+ // and must *not* implement Create, Update or Delete.
+ DataSourcesMap map[string]*Resource
+
+ // ConfigureFunc is a function for configuring the provider. If the
+ // provider doesn't need to be configured, this can be omitted.
+ //
+ // See the ConfigureFunc documentation for more information.
+ ConfigureFunc ConfigureFunc
+
+ // MetaReset is called by TestReset to reset any state stored in the meta
+ // interface. This is especially important if the StopContext is stored by
+ // the provider.
+ MetaReset func() error
+
+ meta interface{}
+
+ // a mutex is required because TestReset can directly repalce the stopCtx
+ stopMu sync.Mutex
+ stopCtx context.Context
+ stopCtxCancel context.CancelFunc
+ stopOnce sync.Once
+}
+
+// ConfigureFunc is the function used to configure a Provider.
+//
+// The interface{} value returned by this function is stored and passed into
+// the subsequent resources as the meta parameter. This return value is
+// usually used to pass along a configured API client, a configuration
+// structure, etc.
+type ConfigureFunc func(*ResourceData) (interface{}, error)
+
+// InternalValidate should be called to validate the structure
+// of the provider.
+//
+// This should be called in a unit test for any provider to verify
+// before release that a provider is properly configured for use with
+// this library.
+func (p *Provider) InternalValidate() error {
+ if p == nil {
+ return errors.New("provider is nil")
+ }
+
+ var validationErrors error
+ sm := schemaMap(p.Schema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+
+ for k, r := range p.ResourcesMap {
+ if err := r.InternalValidate(nil, true); err != nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
+ }
+ }
+
+ for k, r := range p.DataSourcesMap {
+ if err := r.InternalValidate(nil, false); err != nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
+ }
+ }
+
+ return validationErrors
+}
+
+// Meta returns the metadata associated with this provider that was
+// returned by the Configure call. It will be nil until Configure is called.
+func (p *Provider) Meta() interface{} {
+ return p.meta
+}
+
+// SetMeta can be used to forcefully set the Meta object of the provider.
+// Note that if Configure is called the return value will override anything
+// set here.
+func (p *Provider) SetMeta(v interface{}) {
+ p.meta = v
+}
+
+// Stopped reports whether the provider has been stopped or not.
+func (p *Provider) Stopped() bool {
+ ctx := p.StopContext()
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
+
+// StopCh returns a channel that is closed once the provider is stopped.
+func (p *Provider) StopContext() context.Context {
+ p.stopOnce.Do(p.stopInit)
+
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ return p.stopCtx
+}
+
+func (p *Provider) stopInit() {
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvider interface.
+func (p *Provider) Stop() error {
+ p.stopOnce.Do(p.stopInit)
+
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ p.stopCtxCancel()
+ return nil
+}
+
+// TestReset resets any state stored in the Provider, and will call TestReset
+// on Meta if it implements the TestProvider interface.
+// This may be used to reset the schema.Provider at the start of a test, and is
+// automatically called by resource.Test.
+func (p *Provider) TestReset() error {
+ p.stopInit()
+ if p.MetaReset != nil {
+ return p.MetaReset()
+ }
+ return nil
+}
+
+// Input implementation of terraform.ResourceProvider interface.
+func (p *Provider) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ return schemaMap(p.Schema).Input(input, c)
+}
+
+// Validate implementation of terraform.ResourceProvider interface.
+func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ if err := p.InternalValidate(); err != nil {
+ return nil, []error{fmt.Errorf(
+ "Internal validation of the provider failed! This is always a bug\n"+
+ "with the provider itself, and not a user issue. Please report\n"+
+ "this bug:\n\n%s", err)}
+ }
+
+ return schemaMap(p.Schema).Validate(c)
+}
+
+// ValidateResource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateResource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := p.ResourcesMap[t]
+ if !ok {
+ return nil, []error{fmt.Errorf(
+ "Provider doesn't support resource: %s", t)}
+ }
+
+ return r.Validate(c)
+}
+
+// Configure implementation of terraform.ResourceProvider interface.
+func (p *Provider) Configure(c *terraform.ResourceConfig) error {
+ // No configuration
+ if p.ConfigureFunc == nil {
+ return nil
+ }
+
+ sm := schemaMap(p.Schema)
+
+ // Get a ResourceData for this configuration. To do this, we actually
+ // generate an intermediary "diff" although that is never exposed.
+ diff, err := sm.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+
+ data, err := sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+
+ meta, err := p.ConfigureFunc(data)
+ if err != nil {
+ return err
+ }
+
+ p.meta = meta
+ return nil
+}
+
+// Apply implementation of terraform.ResourceProvider interface.
+func (p *Provider) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Apply(s, d, p.meta)
+}
+
+// Diff implementation of terraform.ResourceProvider interface.
+func (p *Provider) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Diff(s, c)
+}
+
+// Refresh implementation of terraform.ResourceProvider interface.
+func (p *Provider) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState) (*terraform.InstanceState, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Refresh(s, p.meta)
+}
+
+// Resources implementation of terraform.ResourceProvider interface.
+func (p *Provider) Resources() []terraform.ResourceType {
+ keys := make([]string, 0, len(p.ResourcesMap))
+ for k, _ := range p.ResourcesMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ result := make([]terraform.ResourceType, 0, len(keys))
+ for _, k := range keys {
+ resource := p.ResourcesMap[k]
+
+ // This isn't really possible (it'd fail InternalValidate), but
+ // we do it anyways to avoid a panic.
+ if resource == nil {
+ resource = &Resource{}
+ }
+
+ result = append(result, terraform.ResourceType{
+ Name: k,
+ Importable: resource.Importer != nil,
+ })
+ }
+
+ return result
+}
+
+func (p *Provider) ImportState(
+ info *terraform.InstanceInfo,
+ id string) ([]*terraform.InstanceState, error) {
+ // Find the resource
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ // If it doesn't support import, error
+ if r.Importer == nil {
+ return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
+ }
+
+ // Create the data
+ data := r.Data(nil)
+ data.SetId(id)
+ data.SetType(info.Type)
+
+ // Call the import function
+ results := []*ResourceData{data}
+ if r.Importer.State != nil {
+ var err error
+ results, err = r.Importer.State(data, p.meta)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Convert the results to InstanceState values and return it
+ states := make([]*terraform.InstanceState, len(results))
+ for i, r := range results {
+ states[i] = r.State()
+ }
+
+ // Verify that all are non-nil. If there are any nil the error
+ // isn't obvious so we circumvent that with a friendlier error.
+ for _, s := range states {
+ if s == nil {
+ return nil, fmt.Errorf(
+ "nil entry in ImportState results. This is always a bug with\n" +
+ "the resource that is being imported. Please report this as\n" +
+ "a bug to Terraform.")
+ }
+ }
+
+ return states, nil
+}
+
+// ValidateDataSource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateDataSource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := p.DataSourcesMap[t]
+ if !ok {
+ return nil, []error{fmt.Errorf(
+ "Provider doesn't support data source: %s", t)}
+ }
+
+ return r.Validate(c)
+}
+
+// ReadDataDiff implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataDiff(
+ info *terraform.InstanceInfo,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+ r, ok := p.DataSourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown data source: %s", info.Type)
+ }
+
+ return r.Diff(nil, c)
+}
+
+// RefreshData implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataApply(
+ info *terraform.InstanceInfo,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+
+ r, ok := p.DataSourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown data source: %s", info.Type)
+ }
+
+ return r.ReadDataApply(d, p.meta)
+}
+
+// DataSources implementation of terraform.ResourceProvider interface.
+func (p *Provider) DataSources() []terraform.DataSource {
+ keys := make([]string, 0, len(p.DataSourcesMap))
+ for k, _ := range p.DataSourcesMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ result := make([]terraform.DataSource, 0, len(keys))
+ for _, k := range keys {
+ result = append(result, terraform.DataSource{
+ Name: k,
+ })
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
new file mode 100644
index 00000000..c1564a21
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -0,0 +1,180 @@
+package schema
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provisioner represents a resource provisioner in Terraform and properly
+// implements all of the ResourceProvisioner API.
+//
+// This higher level structure makes it much easier to implement a new or
+// custom provisioner for Terraform.
+//
+// The function callbacks for this structure are all passed a context object.
+// This context object has a number of pre-defined values that can be accessed
+// via the global functions defined in context.go.
+type Provisioner struct {
+ // ConnSchema is the schema for the connection settings for this
+ // provisioner.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ //
+ // NOTE: The value of connection keys can only be strings for now.
+ ConnSchema map[string]*Schema
+
+ // Schema is the schema for the usage of this provisioner.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ Schema map[string]*Schema
+
+ // ApplyFunc is the function for executing the provisioner. This is required.
+ // It is given a context. See the Provisioner struct docs for more
+ // information.
+ ApplyFunc func(ctx context.Context) error
+
+ stopCtx context.Context
+ stopCtxCancel context.CancelFunc
+ stopOnce sync.Once
+}
+
+// Keys that can be used to access data in the context parameters for
+// Provisioners.
+var (
+ connDataInvalid = contextKey("data invalid")
+
+ // This returns a *ResourceData for the connection information.
+ // Guaranteed to never be nil.
+ ProvConnDataKey = contextKey("provider conn data")
+
+ // This returns a *ResourceData for the config information.
+ // Guaranteed to never be nil.
+ ProvConfigDataKey = contextKey("provider config data")
+
+ // This returns a terraform.UIOutput. Guaranteed to never be nil.
+ ProvOutputKey = contextKey("provider output")
+
+ // This returns the raw InstanceState passed to Apply. Guaranteed to
+ // be set, but may be nil.
+ ProvRawStateKey = contextKey("provider raw state")
+)
+
+// InternalValidate should be called to validate the structure
+// of the provisioner.
+//
+// This should be called in a unit test to verify before release that this
+// structure is properly configured for use.
+func (p *Provisioner) InternalValidate() error {
+ if p == nil {
+ return errors.New("provisioner is nil")
+ }
+
+ var validationErrors error
+ {
+ sm := schemaMap(p.ConnSchema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+ }
+
+ {
+ sm := schemaMap(p.Schema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+ }
+
+ if p.ApplyFunc == nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf(
+ "ApplyFunc must not be nil"))
+ }
+
+ return validationErrors
+}
+
+// StopContext returns a context that checks whether a provisioner is stopped.
+func (p *Provisioner) StopContext() context.Context {
+ p.stopOnce.Do(p.stopInit)
+ return p.stopCtx
+}
+
+func (p *Provisioner) stopInit() {
+ p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Stop() error {
+ p.stopOnce.Do(p.stopInit)
+ p.stopCtxCancel()
+ return nil
+}
+
+func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ return schemaMap(p.Schema).Validate(c)
+}
+
+// Apply implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Apply(
+ o terraform.UIOutput,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) error {
+ var connData, configData *ResourceData
+
+ {
+ // We first need to turn the connection information into a
+ // terraform.ResourceConfig so that we can use that type to more
+ // easily build a ResourceData structure. We do this by simply treating
+ // the conn info as configuration input.
+ raw := make(map[string]interface{})
+ if s != nil {
+ for k, v := range s.Ephemeral.ConnInfo {
+ raw[k] = v
+ }
+ }
+
+ c, err := config.NewRawConfig(raw)
+ if err != nil {
+ return err
+ }
+
+ sm := schemaMap(p.ConnSchema)
+ diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+ if err != nil {
+ return err
+ }
+ connData, err = sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ }
+
+ {
+ // Build the configuration data. Doing this requires making a "diff"
+ // even though that's never used. We use that just to get the correct types.
+ configMap := schemaMap(p.Schema)
+ diff, err := configMap.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+ configData, err = configMap.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Build the context and call the function
+ ctx := p.StopContext()
+ ctx = context.WithValue(ctx, ProvConnDataKey, connData)
+ ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
+ ctx = context.WithValue(ctx, ProvOutputKey, o)
+ ctx = context.WithValue(ctx, ProvRawStateKey, s)
+ return p.ApplyFunc(ctx)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
new file mode 100644
index 00000000..c8105588
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -0,0 +1,478 @@
+package schema
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Resource represents a thing in Terraform that has a set of configurable
+// attributes and a lifecycle (create, read, update, delete).
+//
+// The Resource schema is an abstraction that allows provider writers to
+// worry only about CRUD operations while off-loading validation, diff
+// generation, etc. to this higher level library.
+//
+// In spite of the name, this struct is not used only for terraform resources,
+// but also for data sources. In the case of data sources, the Create,
+// Update and Delete functions must not be provided.
+type Resource struct {
+ // Schema is the schema for the configuration of this resource.
+ //
+ // The keys of this map are the configuration keys, and the values
+ // describe the schema of the configuration value.
+ //
+ // The schema is used to represent both configurable data as well
+ // as data that might be computed in the process of creating this
+ // resource.
+ Schema map[string]*Schema
+
+ // SchemaVersion is the version number for this resource's Schema
+ // definition. The current SchemaVersion stored in the state for each
+ // resource. Provider authors can increment this version number
+ // when Schema semantics change. If the State's SchemaVersion is less than
+ // the current SchemaVersion, the InstanceState is yielded to the
+ // MigrateState callback, where the provider can make whatever changes it
+ // needs to update the state to be compatible to the latest version of the
+ // Schema.
+ //
+ // When unset, SchemaVersion defaults to 0, so provider authors can start
+ // their Versioning at any integer >= 1
+ SchemaVersion int
+
+ // MigrateState is responsible for updating an InstanceState with an old
+ // version to the format expected by the current version of the Schema.
+ //
+ // It is called during Refresh if the State's stored SchemaVersion is less
+ // than the current SchemaVersion of the Resource.
+ //
+ // The function is yielded the state's stored SchemaVersion and a pointer to
+ // the InstanceState that needs updating, as well as the configured
+ // provider's configured meta interface{}, in case the migration process
+ // needs to make any remote API calls.
+ MigrateState StateMigrateFunc
+
+ // The functions below are the CRUD operations for this resource.
+ //
+ // The only optional operation is Update. If Update is not implemented,
+ // then updates will not be supported for this resource.
+ //
+ // The ResourceData parameter in the functions below are used to
+ // query configuration and changes for the resource as well as to set
+ // the ID, computed data, etc.
+ //
+ // The interface{} parameter is the result of the ConfigureFunc in
+ // the provider for this resource. If the provider does not define
+ // a ConfigureFunc, this will be nil. This parameter should be used
+ // to store API clients, configuration structures, etc.
+ //
+ // If any errors occur during each of the operation, an error should be
+ // returned. If a resource was partially updated, be careful to enable
+ // partial state mode for ResourceData and use it accordingly.
+ //
+ // Exists is a function that is called to check if a resource still
+ // exists. If this returns false, then this will affect the diff
+ // accordingly. If this function isn't set, it will not be called. It
+ // is highly recommended to set it. The *ResourceData passed to Exists
+ // should _not_ be modified.
+ Create CreateFunc
+ Read ReadFunc
+ Update UpdateFunc
+ Delete DeleteFunc
+ Exists ExistsFunc
+
+ // Importer is the ResourceImporter implementation for this resource.
+ // If this is nil, then this resource does not support importing. If
+ // this is non-nil, then it supports importing and ResourceImporter
+ // must be validated. The validity of ResourceImporter is verified
+ // by InternalValidate on Resource.
+ Importer *ResourceImporter
+
+ // If non-empty, this string is emitted as a warning during Validate.
+ // This is a private interface for now, for use by DataSourceResourceShim,
+ // and not for general use. (But maybe later...)
+ deprecationMessage string
+
+ // Timeouts allow users to specify specific time durations in which an
+ // operation should time out, to allow them to extend an action to suit their
+ // usage. For example, a user may specify a large Creation timeout for their
+ // AWS RDS Instance due to it's size, or restoring from a snapshot.
+ // Resource implementors must enable Timeout support by adding the allowed
+ // actions (Create, Read, Update, Delete, Default) to the Resource struct, and
+ // accessing them in the matching methods.
+ Timeouts *ResourceTimeout
+}
+
+// See Resource documentation.
+type CreateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ReadFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type UpdateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type DeleteFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ExistsFunc func(*ResourceData, interface{}) (bool, error)
+
+// See Resource documentation.
+type StateMigrateFunc func(
+ int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
+
+// Apply creates, updates, and/or deletes a resource.
+func (r *Resource) Apply(
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff,
+ meta interface{}) (*terraform.InstanceState, error) {
+ data, err := schemaMap(r.Schema).Data(s, d)
+ if err != nil {
+ return s, err
+ }
+
+ // Instance Diff shoould have the timeout info, need to copy it over to the
+ // ResourceData meta
+ rt := ResourceTimeout{}
+ if _, ok := d.Meta[TimeoutKey]; ok {
+ if err := rt.DiffDecode(d); err != nil {
+ log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+ }
+ } else {
+ log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
+ }
+ data.timeouts = &rt
+
+ if s == nil {
+ // The Terraform API dictates that this should never happen, but
+ // it doesn't hurt to be safe in this case.
+ s = new(terraform.InstanceState)
+ }
+
+ if d.Destroy || d.RequiresNew() {
+ if s.ID != "" {
+ // Destroy the resource since it is created
+ if err := r.Delete(data, meta); err != nil {
+ return r.recordCurrentSchemaVersion(data.State()), err
+ }
+
+ // Make sure the ID is gone.
+ data.SetId("")
+ }
+
+ // If we're only destroying, and not creating, then return
+ // now since we're done!
+ if !d.RequiresNew() {
+ return nil, nil
+ }
+
+ // Reset the data to be stateless since we just destroyed
+ data, err = schemaMap(r.Schema).Data(nil, d)
+ // data was reset, need to re-apply the parsed timeouts
+ data.timeouts = &rt
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = nil
+ if data.Id() == "" {
+ // We're creating, it is a new resource.
+ data.MarkNewResource()
+ err = r.Create(data, meta)
+ } else {
+ if r.Update == nil {
+ return s, fmt.Errorf("doesn't support update")
+ }
+
+ err = r.Update(data, meta)
+ }
+
+ return r.recordCurrentSchemaVersion(data.State()), err
+}
+
+// Diff returns a diff of this resource and is API compatible with the
+// ResourceProvider interface.
+func (r *Resource) Diff(
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+ t := &ResourceTimeout{}
+ err := t.ConfigDecode(r, c)
+
+ if err != nil {
+ return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
+ }
+
+ instanceDiff, err := schemaMap(r.Schema).Diff(s, c)
+ if err != nil {
+ return instanceDiff, err
+ }
+
+ if instanceDiff != nil {
+ if err := t.DiffEncode(instanceDiff); err != nil {
+ log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
+ }
+ } else {
+ log.Printf("[DEBUG] Instance Diff is nil in Diff()")
+ }
+
+ return instanceDiff, err
+}
+
+// Validate validates the resource configuration against the schema.
+func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ warns, errs := schemaMap(r.Schema).Validate(c)
+
+ if r.deprecationMessage != "" {
+ warns = append(warns, r.deprecationMessage)
+ }
+
+ return warns, errs
+}
+
+// ReadDataApply loads the data for a data source, given a diff that
+// describes the configuration arguments and desired computed attributes.
+func (r *Resource) ReadDataApply(
+ d *terraform.InstanceDiff,
+ meta interface{},
+) (*terraform.InstanceState, error) {
+
+ // Data sources are always built completely from scratch
+ // on each read, so the source state is always nil.
+ data, err := schemaMap(r.Schema).Data(nil, d)
+ if err != nil {
+ return nil, err
+ }
+
+ err = r.Read(data, meta)
+ state := data.State()
+ if state != nil && state.ID == "" {
+ // Data sources can set an ID if they want, but they aren't
+ // required to; we'll provide a placeholder if they don't,
+ // to preserve the invariant that all resources have non-empty
+ // ids.
+ state.ID = "-"
+ }
+
+ return r.recordCurrentSchemaVersion(state), err
+}
+
+// Refresh refreshes the state of the resource.
+func (r *Resource) Refresh(
+ s *terraform.InstanceState,
+ meta interface{}) (*terraform.InstanceState, error) {
+ // If the ID is already somehow blank, it doesn't exist
+ if s.ID == "" {
+ return nil, nil
+ }
+
+ rt := ResourceTimeout{}
+ if _, ok := s.Meta[TimeoutKey]; ok {
+ if err := rt.StateDecode(s); err != nil {
+ log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+ }
+ }
+
+ if r.Exists != nil {
+ // Make a copy of data so that if it is modified it doesn't
+ // affect our Read later.
+ data, err := schemaMap(r.Schema).Data(s, nil)
+ data.timeouts = &rt
+
+ if err != nil {
+ return s, err
+ }
+
+ exists, err := r.Exists(data, meta)
+ if err != nil {
+ return s, err
+ }
+ if !exists {
+ return nil, nil
+ }
+ }
+
+ needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
+ if needsMigration && r.MigrateState != nil {
+ s, err := r.MigrateState(stateSchemaVersion, s, meta)
+ if err != nil {
+ return s, err
+ }
+ }
+
+ data, err := schemaMap(r.Schema).Data(s, nil)
+ data.timeouts = &rt
+ if err != nil {
+ return s, err
+ }
+
+ err = r.Read(data, meta)
+ state := data.State()
+ if state != nil && state.ID == "" {
+ state = nil
+ }
+
+ return r.recordCurrentSchemaVersion(state), err
+}
+
+// InternalValidate should be called to validate the structure
+// of the resource.
+//
+// This should be called in a unit test for any resource to verify
+// before release that a resource is properly configured for use with
+// this library.
+//
+// Provider.InternalValidate() will automatically call this for all of
+// the resources it manages, so you don't need to call this manually if it
+// is part of a Provider.
+func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
+ if r == nil {
+ return errors.New("resource is nil")
+ }
+
+ if !writable {
+ if r.Create != nil || r.Update != nil || r.Delete != nil {
+ return fmt.Errorf("must not implement Create, Update or Delete")
+ }
+ }
+
+ tsm := topSchemaMap
+
+ if r.isTopLevel() && writable {
+ // All non-Computed attributes must be ForceNew if Update is not defined
+ if r.Update == nil {
+ nonForceNewAttrs := make([]string, 0)
+ for k, v := range r.Schema {
+ if !v.ForceNew && !v.Computed {
+ nonForceNewAttrs = append(nonForceNewAttrs, k)
+ }
+ }
+ if len(nonForceNewAttrs) > 0 {
+ return fmt.Errorf(
+ "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
+ }
+ } else {
+ nonUpdateableAttrs := make([]string, 0)
+ for k, v := range r.Schema {
+ if v.ForceNew || v.Computed && !v.Optional {
+ nonUpdateableAttrs = append(nonUpdateableAttrs, k)
+ }
+ }
+ updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
+ if updateableAttrs == 0 {
+ return fmt.Errorf(
+ "All fields are ForceNew or Computed w/out Optional, Update is superfluous")
+ }
+ }
+
+ tsm = schemaMap(r.Schema)
+
+ // Destroy, and Read are required
+ if r.Read == nil {
+ return fmt.Errorf("Read must be implemented")
+ }
+ if r.Delete == nil {
+ return fmt.Errorf("Delete must be implemented")
+ }
+
+ // If we have an importer, we need to verify the importer.
+ if r.Importer != nil {
+ if err := r.Importer.InternalValidate(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return schemaMap(r.Schema).InternalValidate(tsm)
+}
+
+// Data returns a ResourceData struct for this Resource. Each return value
+// is a separate copy and can be safely modified differently.
+//
+// The data returned from this function has no actual affect on the Resource
+// itself (including the state given to this function).
+//
+// This function is useful for unit tests and ResourceImporter functions.
+func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
+ result, err := schemaMap(r.Schema).Data(s, nil)
+ if err != nil {
+ // At the time of writing, this isn't possible (Data never returns
+ // non-nil errors). We panic to find this in the future if we have to.
+ // I don't see a reason for Data to ever return an error.
+ panic(err)
+ }
+
+ // Set the schema version to latest by default
+ result.meta = map[string]interface{}{
+ "schema_version": strconv.Itoa(r.SchemaVersion),
+ }
+
+ return result
+}
+
+// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
+//
+// TODO: May be able to be removed with the above ResourceData function.
+func (r *Resource) TestResourceData() *ResourceData {
+ return &ResourceData{
+ schema: r.Schema,
+ }
+}
+
+// Returns true if the resource is "top level" i.e. not a sub-resource.
+func (r *Resource) isTopLevel() bool {
+ // TODO: This is a heuristic; replace with a definitive attribute?
+ return r.Create != nil
+}
+
+// Determines if a given InstanceState needs to be migrated by checking the
+// stored version number with the current SchemaVersion
+func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
+ // Get the raw interface{} value for the schema version. If it doesn't
+ // exist or is nil then set it to zero.
+ raw := is.Meta["schema_version"]
+ if raw == nil {
+ raw = "0"
+ }
+
+ // Try to convert it to a string. If it isn't a string then we pretend
+ // that it isn't set at all. It should never not be a string unless it
+ // was manually tampered with.
+ rawString, ok := raw.(string)
+ if !ok {
+ rawString = "0"
+ }
+
+ stateSchemaVersion, _ := strconv.Atoi(rawString)
+ return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion
+}
+
+func (r *Resource) recordCurrentSchemaVersion(
+ state *terraform.InstanceState) *terraform.InstanceState {
+ if state != nil && r.SchemaVersion > 0 {
+ if state.Meta == nil {
+ state.Meta = make(map[string]interface{})
+ }
+ state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
+ }
+ return state
+}
+
+// Noop is a convenience implementation of resource function which takes
+// no action and returns no error.
+func Noop(*ResourceData, interface{}) error {
+ return nil
+}
+
+// RemoveFromState is a convenience implementation of a resource function
+// which sets the resource ID to empty string (to remove it from state)
+// and returns no error.
+func RemoveFromState(d *ResourceData, _ interface{}) error {
+ d.SetId("")
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
new file mode 100644
index 00000000..b2bc8f6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -0,0 +1,502 @@
+package schema
+
+import (
+ "log"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceData is used to query and set the attributes of a resource.
+//
+// ResourceData is the primary argument received for CRUD operations on
+// a resource as well as configuration of a provider. It is a powerful
+// structure that can be used to not only query data, but check for changes,
+// define partial state updates, etc.
+//
+// The most relevant methods to take a look at are Get, Set, and Partial.
+type ResourceData struct {
+ // Settable (internally)
+ schema map[string]*Schema
+ config *terraform.ResourceConfig
+ state *terraform.InstanceState
+ diff *terraform.InstanceDiff
+ meta map[string]interface{}
+ timeouts *ResourceTimeout
+
+ // Don't set
+ multiReader *MultiLevelFieldReader
+ setWriter *MapFieldWriter
+ newState *terraform.InstanceState
+ partial bool
+ partialMap map[string]struct{}
+ once sync.Once
+ isNew bool
+}
+
+// getResult is the internal structure that is generated when a Get
+// is called that contains some extra data that might be used.
+type getResult struct {
+ Value interface{}
+ ValueProcessed interface{}
+ Computed bool
+ Exists bool
+ Schema *Schema
+}
+
+// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
+// values, bypassing schema. This MUST NOT be used in normal circumstances -
+// it exists only to support the remote_state data source.
+func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
+ d.once.Do(d.init)
+
+ d.setWriter.unsafeWriteField(key, value)
+}
+
+// Get returns the data for the given key, or nil if the key doesn't exist
+// in the schema.
+//
+// If the key does exist in the schema but doesn't exist in the configuration,
+// then the default value for that type will be returned. For strings, this is
+// "", for numbers it is 0, etc.
+//
+// If you want to test if something is set at all in the configuration,
+// use GetOk.
+func (d *ResourceData) Get(key string) interface{} {
+ v, _ := d.GetOk(key)
+ return v
+}
+
+// GetChange returns the old and new value for a given key.
+//
+// HasChange should be used to check if a change exists. It is possible
+// that both the old and new value are the same if the old value was not
+// set and the new value is. This is common, for example, for boolean
+// fields which have a zero value of false.
+func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
+ o, n := d.getChange(key, getSourceState, getSourceDiff)
+ return o.Value, n.Value
+}
+
+// GetOk returns the data for the given key and whether or not the key
+// has been set to a non-zero value at some point.
+//
+// The first result will not necessarilly be nil if the value doesn't exist.
+// The second result should be checked to determine this information.
+func (d *ResourceData) GetOk(key string) (interface{}, bool) {
+ r := d.getRaw(key, getSourceSet)
+ exists := r.Exists && !r.Computed
+ if exists {
+ // If it exists, we also want to verify it is not the zero-value.
+ value := r.Value
+ zero := r.Schema.Type.Zero()
+
+ if eq, ok := value.(Equal); ok {
+ exists = !eq.Equal(zero)
+ } else {
+ exists = !reflect.DeepEqual(value, zero)
+ }
+ }
+
+ return r.Value, exists
+}
+
+func (d *ResourceData) getRaw(key string, level getSource) getResult {
+ var parts []string
+ if key != "" {
+ parts = strings.Split(key, ".")
+ }
+
+ return d.get(parts, level)
+}
+
+// HasChange returns whether or not the given key has been changed.
+func (d *ResourceData) HasChange(key string) bool {
+ o, n := d.GetChange(key)
+
+ // If the type implements the Equal interface, then call that
+ // instead of just doing a reflect.DeepEqual. An example where this is
+ // needed is *Set
+ if eq, ok := o.(Equal); ok {
+ return !eq.Equal(n)
+ }
+
+ return !reflect.DeepEqual(o, n)
+}
+
+// Partial turns partial state mode on/off.
+//
+// When partial state mode is enabled, then only key prefixes specified
+// by SetPartial will be in the final state. This allows providers to return
+// partial states for partially applied resources (when errors occur).
+func (d *ResourceData) Partial(on bool) {
+ d.partial = on
+ if on {
+ if d.partialMap == nil {
+ d.partialMap = make(map[string]struct{})
+ }
+ } else {
+ d.partialMap = nil
+ }
+}
+
+// Set sets the value for the given key.
+//
+// If the key is invalid or the value is not a correct type, an error
+// will be returned.
+func (d *ResourceData) Set(key string, value interface{}) error {
+ d.once.Do(d.init)
+
+ // If the value is a pointer to a non-struct, get its value and
+ // use that. This allows Set to take a pointer to primitives to
+ // simplify the interface.
+ reflectVal := reflect.ValueOf(value)
+ if reflectVal.Kind() == reflect.Ptr {
+ if reflectVal.IsNil() {
+ // If the pointer is nil, then the value is just nil
+ value = nil
+ } else {
+ // Otherwise, we dereference the pointer as long as its not
+ // a pointer to a struct, since struct pointers are allowed.
+ reflectVal = reflect.Indirect(reflectVal)
+ if reflectVal.Kind() != reflect.Struct {
+ value = reflectVal.Interface()
+ }
+ }
+ }
+
+ return d.setWriter.WriteField(strings.Split(key, "."), value)
+}
+
+// SetPartial adds the key to the final state output while
+// in partial state mode. The key must be a root key in the schema (i.e.
+// it cannot be "list.0").
+//
+// If partial state mode is disabled, then this has no effect. Additionally,
+// whenever partial state mode is toggled, the partial data is cleared.
+func (d *ResourceData) SetPartial(k string) {
+ if d.partial {
+ d.partialMap[k] = struct{}{}
+ }
+}
+
+func (d *ResourceData) MarkNewResource() {
+ d.isNew = true
+}
+
+func (d *ResourceData) IsNewResource() bool {
+ return d.isNew
+}
+
+// Id returns the ID of the resource.
+func (d *ResourceData) Id() string {
+ var result string
+
+ if d.state != nil {
+ result = d.state.ID
+ }
+
+ if d.newState != nil {
+ result = d.newState.ID
+ }
+
+ return result
+}
+
+// ConnInfo returns the connection info for this resource.
+func (d *ResourceData) ConnInfo() map[string]string {
+ if d.newState != nil {
+ return d.newState.Ephemeral.ConnInfo
+ }
+
+ if d.state != nil {
+ return d.state.Ephemeral.ConnInfo
+ }
+
+ return nil
+}
+
+// SetId sets the ID of the resource. If the value is blank, then the
+// resource is destroyed.
+func (d *ResourceData) SetId(v string) {
+ d.once.Do(d.init)
+ d.newState.ID = v
+}
+
+// SetConnInfo sets the connection info for a resource.
+func (d *ResourceData) SetConnInfo(v map[string]string) {
+ d.once.Do(d.init)
+ d.newState.Ephemeral.ConnInfo = v
+}
+
+// SetType sets the ephemeral type for the data. This is only required
+// for importing.
+func (d *ResourceData) SetType(t string) {
+ d.once.Do(d.init)
+ d.newState.Ephemeral.Type = t
+}
+
+// State returns the new InstanceState after the diff and any Set
+// calls.
+func (d *ResourceData) State() *terraform.InstanceState {
+ var result terraform.InstanceState
+ result.ID = d.Id()
+ result.Meta = d.meta
+
+ // If we have no ID, then this resource doesn't exist and we just
+ // return nil.
+ if result.ID == "" {
+ return nil
+ }
+
+ if d.timeouts != nil {
+ if err := d.timeouts.StateEncode(&result); err != nil {
+ log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
+ }
+ }
+
+ // Look for a magic key in the schema that determines we skip the
+ // integrity check of fields existing in the schema, allowing dynamic
+ // keys to be created.
+ hasDynamicAttributes := false
+ for k, _ := range d.schema {
+ if k == "__has_dynamic_attributes" {
+ hasDynamicAttributes = true
+ log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
+ }
+ }
+
+ // In order to build the final state attributes, we read the full
+ // attribute set as a map[string]interface{}, write it to a MapFieldWriter,
+ // and then use that map.
+ rawMap := make(map[string]interface{})
+ for k := range d.schema {
+ source := getSourceSet
+ if d.partial {
+ source = getSourceState
+ if _, ok := d.partialMap[k]; ok {
+ source = getSourceSet
+ }
+ }
+
+ raw := d.get([]string{k}, source)
+ if raw.Exists && !raw.Computed {
+ rawMap[k] = raw.Value
+ if raw.ValueProcessed != nil {
+ rawMap[k] = raw.ValueProcessed
+ }
+ }
+ }
+
+ mapW := &MapFieldWriter{Schema: d.schema}
+ if err := mapW.WriteField(nil, rawMap); err != nil {
+ return nil
+ }
+
+ result.Attributes = mapW.Map()
+
+ if hasDynamicAttributes {
+ // If we have dynamic attributes, just copy the attributes map
+ // one for one into the result attributes.
+ for k, v := range d.setWriter.Map() {
+ // Don't clobber schema values. This limits usage of dynamic
+ // attributes to names which _do not_ conflict with schema
+ // keys!
+ if _, ok := result.Attributes[k]; !ok {
+ result.Attributes[k] = v
+ }
+ }
+ }
+
+ if d.newState != nil {
+ result.Ephemeral = d.newState.Ephemeral
+ }
+
+ // TODO: This is hacky and we can remove this when we have a proper
+ // state writer. We should instead have a proper StateFieldWriter
+ // and use that.
+ for k, schema := range d.schema {
+ if schema.Type != TypeMap {
+ continue
+ }
+
+ if result.Attributes[k] == "" {
+ delete(result.Attributes, k)
+ }
+ }
+
+ if v := d.Id(); v != "" {
+ result.Attributes["id"] = d.Id()
+ }
+
+ if d.state != nil {
+ result.Tainted = d.state.Tainted
+ }
+
+ return &result
+}
+
+// Timeout returns the data for the given timeout key
+// Returns a duration of 20 minutes for any key not found, or not found and no default.
+func (d *ResourceData) Timeout(key string) time.Duration {
+ key = strings.ToLower(key)
+
+ var timeout *time.Duration
+ switch key {
+ case TimeoutCreate:
+ timeout = d.timeouts.Create
+ case TimeoutRead:
+ timeout = d.timeouts.Read
+ case TimeoutUpdate:
+ timeout = d.timeouts.Update
+ case TimeoutDelete:
+ timeout = d.timeouts.Delete
+ }
+
+ if timeout != nil {
+ return *timeout
+ }
+
+ if d.timeouts.Default != nil {
+ return *d.timeouts.Default
+ }
+
+ // Return system default of 20 minutes
+ return 20 * time.Minute
+}
+
+func (d *ResourceData) init() {
+ // Initialize the field that will store our new state
+ var copyState terraform.InstanceState
+ if d.state != nil {
+ copyState = *d.state.DeepCopy()
+ }
+ d.newState = &copyState
+
+ // Initialize the map for storing set data
+ d.setWriter = &MapFieldWriter{Schema: d.schema}
+
+ // Initialize the reader for getting data from the
+ // underlying sources (config, diff, etc.)
+ readers := make(map[string]FieldReader)
+ var stateAttributes map[string]string
+ if d.state != nil {
+ stateAttributes = d.state.Attributes
+ readers["state"] = &MapFieldReader{
+ Schema: d.schema,
+ Map: BasicMapReader(stateAttributes),
+ }
+ }
+ if d.config != nil {
+ readers["config"] = &ConfigFieldReader{
+ Schema: d.schema,
+ Config: d.config,
+ }
+ }
+ if d.diff != nil {
+ readers["diff"] = &DiffFieldReader{
+ Schema: d.schema,
+ Diff: d.diff,
+ Source: &MultiLevelFieldReader{
+ Levels: []string{"state", "config"},
+ Readers: readers,
+ },
+ }
+ }
+ readers["set"] = &MapFieldReader{
+ Schema: d.schema,
+ Map: BasicMapReader(d.setWriter.Map()),
+ }
+ d.multiReader = &MultiLevelFieldReader{
+ Levels: []string{
+ "state",
+ "config",
+ "diff",
+ "set",
+ },
+
+ Readers: readers,
+ }
+}
+
+func (d *ResourceData) diffChange(
+ k string) (interface{}, interface{}, bool, bool) {
+ // Get the change between the state and the config.
+ o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
+ if !o.Exists {
+ o.Value = nil
+ }
+ if !n.Exists {
+ n.Value = nil
+ }
+
+ // Return the old, new, and whether there is a change
+ return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
+}
+
+func (d *ResourceData) getChange(
+ k string,
+ oldLevel getSource,
+ newLevel getSource) (getResult, getResult) {
+ var parts, parts2 []string
+ if k != "" {
+ parts = strings.Split(k, ".")
+ parts2 = strings.Split(k, ".")
+ }
+
+ o := d.get(parts, oldLevel)
+ n := d.get(parts2, newLevel)
+ return o, n
+}
+
+func (d *ResourceData) get(addr []string, source getSource) getResult {
+ d.once.Do(d.init)
+
+ level := "set"
+ flags := source & ^getSourceLevelMask
+ exact := flags&getSourceExact != 0
+ source = source & getSourceLevelMask
+ if source >= getSourceSet {
+ level = "set"
+ } else if source >= getSourceDiff {
+ level = "diff"
+ } else if source >= getSourceConfig {
+ level = "config"
+ } else {
+ level = "state"
+ }
+
+ var result FieldReadResult
+ var err error
+ if exact {
+ result, err = d.multiReader.ReadFieldExact(addr, level)
+ } else {
+ result, err = d.multiReader.ReadFieldMerge(addr, level)
+ }
+ if err != nil {
+ panic(err)
+ }
+
+ // If the result doesn't exist, then we set the value to the zero value
+ var schema *Schema
+ if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
+ schema = schemaL[len(schemaL)-1]
+ }
+
+ if result.Value == nil && schema != nil {
+ result.Value = result.ValueOrZero(schema)
+ }
+
+ // Transform the FieldReadResult into a getResult. It might be worth
+ // merging these two structures one day.
+ return getResult{
+ Value: result.Value,
+ ValueProcessed: result.ValueProcessed,
+ Computed: result.Computed,
+ Exists: result.Exists,
+ Schema: schema,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
new file mode 100644
index 00000000..7dd655de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
@@ -0,0 +1,17 @@
+package schema
+
+//go:generate stringer -type=getSource resource_data_get_source.go
+
+// getSource represents the level we want to get for a value (internally).
+// Any source less than or equal to the level will be loaded (whichever
+// has a value first).
+type getSource byte
+
+const (
+ getSourceState getSource = 1 << iota
+ getSourceConfig
+ getSourceDiff
+ getSourceSet
+ getSourceExact // Only get from the _exact_ level
+ getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
+)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
new file mode 100644
index 00000000..5dada3ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
@@ -0,0 +1,52 @@
+package schema
+
+// ResourceImporter defines how a resource is imported in Terraform. This
+// can be set onto a Resource struct to make it Importable. Not all resources
+// have to be importable; if a Resource doesn't have a ResourceImporter then
+// it won't be importable.
+//
+// "Importing" in Terraform is the process of taking an already-created
+// resource and bringing it under Terraform management. This can include
+// updating Terraform state, generating Terraform configuration, etc.
+type ResourceImporter struct {
+ // The functions below must all be implemented for importing to work.
+
+ // State is called to convert an ID to one or more InstanceState to
+ // insert into the Terraform state. If this isn't specified, then
+ // the ID is passed straight through.
+ State StateFunc
+}
+
+// StateFunc is the function called to import a resource into the
+// Terraform state. It is given a ResourceData with only ID set. This
+// ID is going to be an arbitrary value given by the user and may not map
+// directly to the ID format that the resource expects, so that should
+// be validated.
+//
+// This should return a slice of ResourceData that turn into the state
+// that was imported. This might be as simple as returning only the argument
+// that was given to the function. In other cases (such as AWS security groups),
+// an import may fan out to multiple resources and this will have to return
+// multiple.
+//
+// To create the ResourceData structures for other resource types (if
+// you have to), instantiate your resource and call the Data function.
+type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
+
+// InternalValidate should be called to validate the structure of this
+// importer. This should be called in a unit test.
+//
+// Resource.InternalValidate() will automatically call this, so this doesn't
+// need to be called manually. Further, Resource.InternalValidate() is
+// automatically called by Provider.InternalValidate(), so you only need
+// to internal validate the provider.
+func (r *ResourceImporter) InternalValidate() error {
+ return nil
+}
+
+// ImportStatePassthrough is an implementation of StateFunc that can be
+// used to simply pass the ID directly through. This should be used only
+// in the case that an ID-only refresh is possible.
+func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
+ return []*ResourceData{d}, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
new file mode 100644
index 00000000..445819f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -0,0 +1,237 @@
+package schema
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/copystructure"
+)
+
+const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
+const TimeoutsConfigKey = "timeouts"
+
+const (
+ TimeoutCreate = "create"
+ TimeoutRead = "read"
+ TimeoutUpdate = "update"
+ TimeoutDelete = "delete"
+ TimeoutDefault = "default"
+)
+
+func timeoutKeys() []string {
+ return []string{
+ TimeoutCreate,
+ TimeoutRead,
+ TimeoutUpdate,
+ TimeoutDelete,
+ TimeoutDefault,
+ }
+}
+
+// could be time.Duration, int64 or float64
+func DefaultTimeout(tx interface{}) *time.Duration {
+ var td time.Duration
+ switch raw := tx.(type) {
+ case time.Duration:
+ return &raw
+ case int64:
+ td = time.Duration(raw)
+ case float64:
+ td = time.Duration(int64(raw))
+ default:
+ log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
+ }
+ return &td
+}
+
+type ResourceTimeout struct {
+ Create, Read, Update, Delete, Default *time.Duration
+}
+
+// ConfigDecode takes a schema and the configuration (available in Diff) and
+// validates, parses the timeouts into `t`
+func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
+ if s.Timeouts != nil {
+ raw, err := copystructure.Copy(s.Timeouts)
+ if err != nil {
+ log.Printf("[DEBUG] Error with deep copy: %s", err)
+ }
+ *t = *raw.(*ResourceTimeout)
+ }
+
+ if raw, ok := c.Config[TimeoutsConfigKey]; ok {
+ if configTimeouts, ok := raw.([]map[string]interface{}); ok {
+ for _, timeoutValues := range configTimeouts {
+ // loop through each Timeout given in the configuration and validate they
+ // the Timeout defined in the resource
+ for timeKey, timeValue := range timeoutValues {
+ // validate that we're dealing with the normal CRUD actions
+ var found bool
+ for _, key := range timeoutKeys() {
+ if timeKey == key {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
+ }
+
+ // Get timeout
+ rt, err := time.ParseDuration(timeValue.(string))
+ if err != nil {
+ return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
+ }
+
+ var timeout *time.Duration
+ switch timeKey {
+ case TimeoutCreate:
+ timeout = t.Create
+ case TimeoutUpdate:
+ timeout = t.Update
+ case TimeoutRead:
+ timeout = t.Read
+ case TimeoutDelete:
+ timeout = t.Delete
+ case TimeoutDefault:
+ timeout = t.Default
+ }
+
+ // If the resource has not delcared this in the definition, then error
+ // with an unsupported message
+ if timeout == nil {
+ return unsupportedTimeoutKeyError(timeKey)
+ }
+
+ *timeout = rt
+ }
+ }
+ } else {
+ log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
+ }
+ }
+
+ return nil
+}
+
+func unsupportedTimeoutKeyError(key string) error {
+ return fmt.Errorf("Timeout Key (%s) is not supported", key)
+}
+
+// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
+// interface: they encode/decode a timeouts struct from an instance diff, which is
+// where the timeout data is stored after a diff to pass into Apply.
+//
+// StateEncode encodes the timeout into the ResourceData's InstanceState for
+// saving to state
+//
+func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
+ return t.metaEncode(id)
+}
+
+func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
+ return t.metaEncode(is)
+}
+
+// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
+// and stores it in the Meta field of the interface it's given.
+// Assumes the interface is either *terraform.InstanceState or
+// *terraform.InstanceDiff, returns an error otherwise
+func (t *ResourceTimeout) metaEncode(ids interface{}) error {
+ m := make(map[string]interface{})
+
+ if t.Create != nil {
+ m[TimeoutCreate] = t.Create.Nanoseconds()
+ }
+ if t.Read != nil {
+ m[TimeoutRead] = t.Read.Nanoseconds()
+ }
+ if t.Update != nil {
+ m[TimeoutUpdate] = t.Update.Nanoseconds()
+ }
+ if t.Delete != nil {
+ m[TimeoutDelete] = t.Delete.Nanoseconds()
+ }
+ if t.Default != nil {
+ m[TimeoutDefault] = t.Default.Nanoseconds()
+ // for any key above that is nil, if default is specified, we need to
+ // populate it with the default
+ for _, k := range timeoutKeys() {
+ if _, ok := m[k]; !ok {
+ m[k] = t.Default.Nanoseconds()
+ }
+ }
+ }
+
+ // only add the Timeout to the Meta if we have values
+ if len(m) > 0 {
+ switch instance := ids.(type) {
+ case *terraform.InstanceDiff:
+ if instance.Meta == nil {
+ instance.Meta = make(map[string]interface{})
+ }
+ instance.Meta[TimeoutKey] = m
+ case *terraform.InstanceState:
+ if instance.Meta == nil {
+ instance.Meta = make(map[string]interface{})
+ }
+ instance.Meta[TimeoutKey] = m
+ default:
+ return fmt.Errorf("Error matching type for Diff Encode")
+ }
+ }
+
+ return nil
+}
+
+func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
+ return t.metaDecode(id)
+}
+func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
+ return t.metaDecode(is)
+}
+
+func (t *ResourceTimeout) metaDecode(ids interface{}) error {
+ var rawMeta interface{}
+ var ok bool
+ switch rawInstance := ids.(type) {
+ case *terraform.InstanceDiff:
+ rawMeta, ok = rawInstance.Meta[TimeoutKey]
+ if !ok {
+ return nil
+ }
+ case *terraform.InstanceState:
+ rawMeta, ok = rawInstance.Meta[TimeoutKey]
+ if !ok {
+ return nil
+ }
+ default:
+ return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
+ }
+
+ times := rawMeta.(map[string]interface{})
+ if len(times) == 0 {
+ return nil
+ }
+
+ if v, ok := times[TimeoutCreate]; ok {
+ t.Create = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutRead]; ok {
+ t.Read = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutUpdate]; ok {
+ t.Update = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutDelete]; ok {
+ t.Delete = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutDefault]; ok {
+ t.Default = DefaultTimeout(v)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
new file mode 100644
index 00000000..32d17213
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -0,0 +1,1537 @@
+// schema is a high-level framework for easily writing new providers
+// for Terraform. Usage of schema is recommended over attempting to write
+// to the low-level plugin interfaces manually.
+//
+// schema breaks down provider creation into simple CRUD operations for
+// resources. The logic of diffing, destroying before creating, updating
+// or creating, etc. is all handled by the framework. The plugin author
+// only needs to implement a configuration schema and the CRUD operations and
+// everything else is meant to just work.
+//
+// A good starting point is to view the Provider structure.
+package schema
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// type used for schema package context keys
+type contextKey string
+
+// Schema is used to describe the structure of a value.
+//
+// Read the documentation of the struct elements for important details.
+type Schema struct {
+ // Type is the type of the value and must be one of the ValueType values.
+ //
+ // This type not only determines what type is expected/valid in configuring
+ // this value, but also what type is returned when ResourceData.Get is
+ // called. The types returned by Get are:
+ //
+ // TypeBool - bool
+ // TypeInt - int
+ // TypeFloat - float64
+ // TypeString - string
+ // TypeList - []interface{}
+ // TypeMap - map[string]interface{}
+ // TypeSet - *schema.Set
+ //
+ Type ValueType
+
+ // If one of these is set, then this item can come from the configuration.
+ // Both cannot be set. If Optional is set, the value is optional. If
+ // Required is set, the value is required.
+ //
+ // One of these must be set if the value is not computed. That is:
+ // value either comes from the config, is computed, or is both.
+ Optional bool
+ Required bool
+
+ // If this is non-nil, the provided function will be used during diff
+ // of this field. If this is nil, a default diff for the type of the
+ // schema will be used.
+ //
+ // This allows comparison based on something other than primitive, list
+ // or map equality - for example SSH public keys may be considered
+ // equivalent regardless of trailing whitespace.
+ DiffSuppressFunc SchemaDiffSuppressFunc
+
+ // If this is non-nil, then this will be a default value that is used
+ // when this item is not set in the configuration.
+ //
+ // DefaultFunc can be specified to compute a dynamic default.
+ // Only one of Default or DefaultFunc can be set. If DefaultFunc is
+ // used then its return value should be stable to avoid generating
+ // confusing/perpetual diffs.
+ //
+ // Changing either Default or the return value of DefaultFunc can be
+ // a breaking change, especially if the attribute in question has
+ // ForceNew set. If a default needs to change to align with changing
+ // assumptions in an upstream API then it may be necessary to also use
+ // the MigrateState function on the resource to change the state to match,
+ // or have the Read function adjust the state value to align with the
+ // new default.
+ //
+ // If Required is true above, then Default cannot be set. DefaultFunc
+ // can be set with Required. If the DefaultFunc returns nil, then there
+ // will be no default and the user will be asked to fill it in.
+ //
+ // If either of these is set, then the user won't be asked for input
+ // for this key if the default is not nil.
+ Default interface{}
+ DefaultFunc SchemaDefaultFunc
+
+ // Description is used as the description for docs or asking for user
+ // input. It should be relatively short (a few sentences max) and should
+ // be formatted to fit a CLI.
+ Description string
+
+ // InputDefault is the default value to use for when inputs are requested.
+ // This differs from Default in that if Default is set, no input is
+ // asked for. If Input is asked, this will be the default value offered.
+ InputDefault string
+
+ // The fields below relate to diffs.
+ //
+ // If Computed is true, then the result of this value is computed
+ // (unless specified by config) on creation.
+ //
+ // If ForceNew is true, then a change in this resource necessitates
+ // the creation of a new resource.
+ //
+ // StateFunc is a function called to change the value of this before
+ // storing it in the state (and likewise before comparing for diffs).
+ // The use for this is for example with large strings, you may want
+ // to simply store the hash of it.
+ Computed bool
+ ForceNew bool
+ StateFunc SchemaStateFunc
+
+ // The following fields are only set for a TypeList or TypeSet Type.
+ //
+ // Elem must be either a *Schema or a *Resource only if the Type is
+ // TypeList, and represents what the element type is. If it is *Schema,
+ // the element type is just a simple value. If it is *Resource, the
+ // element type is a complex structure, potentially with its own lifecycle.
+ //
+ // MaxItems defines a maximum amount of items that can exist within a
+ // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+ // used to wrap a complex structure, however more than one instance would
+ // cause instability.
+ //
+ // MinItems defines a minimum amount of items that can exist within a
+ // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+ // used to wrap a complex structure, however less than one instance would
+ // cause instability.
+ //
+ // PromoteSingle, if true, will allow single elements to be standalone
+ // and promote them to a list. For example "foo" would be promoted to
+ // ["foo"] automatically. This is primarily for legacy reasons and the
+ // ambiguity is not recommended for new usage. Promotion is only allowed
+ // for primitive element types.
+ Elem interface{}
+ MaxItems int
+ MinItems int
+ PromoteSingle bool
+
+ // The following fields are only valid for a TypeSet type.
+ //
+ // Set defines a function to determine the unique ID of an item so that
+ // a proper set can be built.
+ Set SchemaSetFunc
+
+ // ComputedWhen is a set of queries on the configuration. Whenever any
+ // of these things is changed, it will require a recompute (this requires
+ // that Computed is set to true).
+ //
+ // NOTE: This currently does not work.
+ ComputedWhen []string
+
+ // ConflictsWith is a set of schema keys that conflict with this schema.
+ // This will only check that they're set in the _config_. This will not
+ // raise an error for a malfunctioning resource that sets a conflicting
+ // key.
+ ConflictsWith []string
+
+ // When Deprecated is set, this attribute is deprecated.
+ //
+ // A deprecated field still works, but will probably stop working in near
+ // future. This string is the message shown to the user with instructions on
+ // how to address the deprecation.
+ Deprecated string
+
+ // When Removed is set, this attribute has been removed from the schema
+ //
+ // Removed attributes can be left in the Schema to generate informative error
+ // messages for the user when they show up in resource configurations.
+ // This string is the message shown to the user with instructions on
+ // what do to about the removed attribute.
+ Removed string
+
+ // ValidateFunc allows individual fields to define arbitrary validation
+ // logic. It is yielded the provided config value as an interface{} that is
+ // guaranteed to be of the proper Schema type, and it can yield warnings or
+ // errors based on inspection of that value.
+ //
+ // ValidateFunc currently only works for primitive types.
+ ValidateFunc SchemaValidateFunc
+
+ // Sensitive ensures that the attribute's value does not get displayed in
+ // logs or regular output. It should be used for passwords or other
+ // secret fields. Future versions of Terraform may encrypt these
+ // values.
+ Sensitive bool
+}
+
+// SchemaDiffSuppresFunc is a function which can be used to determine
+// whether a detected diff on a schema element is "valid" or not, and
+// suppress it from the plan if necessary.
+//
+// Return true if the diff should be suppressed, false to retain it.
+type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool
+
+// SchemaDefaultFunc is a function called to return a default value for
+// a field.
+type SchemaDefaultFunc func() (interface{}, error)
+
+// EnvDefaultFunc is a helper function that returns the value of the
+// given environment variable, if one exists, or the default value
+// otherwise.
+func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc {
+ return func() (interface{}, error) {
+ if v := os.Getenv(k); v != "" {
+ return v, nil
+ }
+
+ return dv, nil
+ }
+}
+
+// MultiEnvDefaultFunc is a helper function that returns the value of the first
+// environment variable in the given list that returns a non-empty value. If
+// none of the environment variables return a value, the default value is
+// returned.
+func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc {
+ return func() (interface{}, error) {
+ for _, k := range ks {
+ if v := os.Getenv(k); v != "" {
+ return v, nil
+ }
+ }
+ return dv, nil
+ }
+}
+
+// SchemaSetFunc is a function that must return a unique ID for the given
+// element. This unique ID is used to store the element in a hash.
+type SchemaSetFunc func(interface{}) int
+
+// SchemaStateFunc is a function used to convert some type to a string
+// to be stored in the state.
+type SchemaStateFunc func(interface{}) string
+
+// SchemaValidateFunc is a function used to validate a single field in the
+// schema.
+type SchemaValidateFunc func(interface{}, string) ([]string, []error)
+
+func (s *Schema) GoString() string {
+ return fmt.Sprintf("*%#v", *s)
+}
+
+// Returns a default value for this schema by either reading Default or
+// evaluating DefaultFunc. If neither of these are defined, returns nil.
+func (s *Schema) DefaultValue() (interface{}, error) {
+ if s.Default != nil {
+ return s.Default, nil
+ }
+
+ if s.DefaultFunc != nil {
+ defaultValue, err := s.DefaultFunc()
+ if err != nil {
+ return nil, fmt.Errorf("error loading default: %s", err)
+ }
+ return defaultValue, nil
+ }
+
+ return nil, nil
+}
+
+// Returns a zero value for the schema.
+func (s *Schema) ZeroValue() interface{} {
+ // If it's a set then we'll do a bit of extra work to provide the
+ // right hashing function in our empty value.
+ if s.Type == TypeSet {
+ setFunc := s.Set
+ if setFunc == nil {
+ // Default set function uses the schema to hash the whole value
+ elem := s.Elem
+ switch t := elem.(type) {
+ case *Schema:
+ setFunc = HashSchema(t)
+ case *Resource:
+ setFunc = HashResource(t)
+ default:
+ panic("invalid set element type")
+ }
+ }
+ return &Set{F: setFunc}
+ } else {
+ return s.Type.Zero()
+ }
+}
+
+func (s *Schema) finalizeDiff(
+ d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
+ if d == nil {
+ return d
+ }
+
+ if s.Type == TypeBool {
+ normalizeBoolString := func(s string) string {
+ switch s {
+ case "0":
+ return "false"
+ case "1":
+ return "true"
+ }
+ return s
+ }
+ d.Old = normalizeBoolString(d.Old)
+ d.New = normalizeBoolString(d.New)
+ }
+
+ if s.Computed && !d.NewRemoved && d.New == "" {
+ // Computed attribute without a new value set
+ d.NewComputed = true
+ }
+
+ if s.ForceNew {
+ // ForceNew, mark that this field is requiring new under the
+ // following conditions, explained below:
+ //
+ // * Old != New - There is a change in value. This field
+ // is therefore causing a new resource.
+ //
+ // * NewComputed - This field is being computed, hence a
+ // potential change in value, mark as causing a new resource.
+ d.RequiresNew = d.Old != d.New || d.NewComputed
+ }
+
+ if d.NewRemoved {
+ return d
+ }
+
+ if s.Computed {
+ if d.Old != "" && d.New == "" {
+ // This is a computed value with an old value set already,
+ // just let it go.
+ return nil
+ }
+
+ if d.New == "" {
+ // Computed attribute without a new value set
+ d.NewComputed = true
+ }
+ }
+
+ if s.Sensitive {
+ // Set the Sensitive flag so output is hidden in the UI
+ d.Sensitive = true
+ }
+
+ return d
+}
+
+// schemaMap is a wrapper that adds nice functions on top of schemas.
+type schemaMap map[string]*Schema
+
+// Data returns a ResourceData for the given schema, state, and diff.
+//
+// The diff is optional.
+func (m schemaMap) Data(
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*ResourceData, error) {
+ return &ResourceData{
+ schema: m,
+ state: s,
+ diff: d,
+ }, nil
+}
+
+// Diff returns the diff for a resource given the schema map,
+// state, and configuration.
+func (m schemaMap) Diff(
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ result := new(terraform.InstanceDiff)
+ result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ // Make sure to mark if the resource is tainted
+ if s != nil {
+ result.DestroyTainted = s.Tainted
+ }
+
+ d := &ResourceData{
+ schema: m,
+ state: s,
+ config: c,
+ }
+
+ for k, schema := range m {
+ err := m.diff(k, schema, result, d, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If the diff requires a new resource, then we recompute the diff
+ // so we have the complete new resource diff, and preserve the
+ // RequiresNew fields where necessary so the user knows exactly what
+ // caused that.
+ if result.RequiresNew() {
+ // Create the new diff
+ result2 := new(terraform.InstanceDiff)
+ result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ // Preserve the DestroyTainted flag
+ result2.DestroyTainted = result.DestroyTainted
+
+ // Reset the data to not contain state. We have to call init()
+ // again in order to reset the FieldReaders.
+ d.state = nil
+ d.init()
+
+ // Perform the diff again
+ for k, schema := range m {
+ err := m.diff(k, schema, result2, d, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Force all the fields to not force a new since we know what we
+ // want to force new.
+ for k, attr := range result2.Attributes {
+ if attr == nil {
+ continue
+ }
+
+ if attr.RequiresNew {
+ attr.RequiresNew = false
+ }
+
+ if s != nil {
+ attr.Old = s.Attributes[k]
+ }
+ }
+
+ // Now copy in all the requires new diffs...
+ for k, attr := range result.Attributes {
+ if attr == nil {
+ continue
+ }
+
+ newAttr, ok := result2.Attributes[k]
+ if !ok {
+ newAttr = attr
+ }
+
+ if attr.RequiresNew {
+ newAttr.RequiresNew = true
+ }
+
+ result2.Attributes[k] = newAttr
+ }
+
+ // And set the diff!
+ result = result2
+ }
+
+ // Remove any nil diffs just to keep things clean
+ for k, v := range result.Attributes {
+ if v == nil {
+ delete(result.Attributes, k)
+ }
+ }
+
+ // Go through and detect all of the ComputedWhens now that we've
+ // finished the diff.
+ // TODO
+
+ if result.Empty() {
+ // If we don't have any diff elements, just return nil
+ return nil, nil
+ }
+
+ return result, nil
+}
+
+// Input implements the terraform.ResourceProvider method by asking
+// for input for required configuration keys that don't have a value.
+func (m schemaMap) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ keys := make([]string, 0, len(m))
+ for k, _ := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := m[k]
+
+ // Skip things that don't require config, if that is even valid
+ // for a provider schema.
+ // Required XOR Optional must always be true to validate, so we only
+ // need to check one.
+ if v.Optional {
+ continue
+ }
+
+ // Deprecated fields should never prompt
+ if v.Deprecated != "" {
+ continue
+ }
+
+ // Skip things that have a value of some sort already
+ if _, ok := c.Raw[k]; ok {
+ continue
+ }
+
+ // Skip if it has a default value
+ defaultValue, err := v.DefaultValue()
+ if err != nil {
+ return nil, fmt.Errorf("%s: error loading default: %s", k, err)
+ }
+ if defaultValue != nil {
+ continue
+ }
+
+ var value interface{}
+ switch v.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList:
+ continue
+ case TypeString:
+ value, err = m.inputString(input, k, v)
+ default:
+ panic(fmt.Sprintf("Unknown type for input: %#v", v.Type))
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf(
+ "%s: %s", k, err)
+ }
+
+ c.Config[k] = value
+ }
+
+ return c, nil
+}
+
+// Validate validates the configuration against this schema mapping.
+func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ return m.validateObject("", m, c)
+}
+
+// InternalValidate validates the format of this schema. This should be called
+// from a unit test (and not in user-path code) to verify that a schema
+// is properly built.
+func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
+ if topSchemaMap == nil {
+ topSchemaMap = m
+ }
+ for k, v := range m {
+ if v.Type == TypeInvalid {
+ return fmt.Errorf("%s: Type must be specified", k)
+ }
+
+ if v.Optional && v.Required {
+ return fmt.Errorf("%s: Optional or Required must be set, not both", k)
+ }
+
+ if v.Required && v.Computed {
+ return fmt.Errorf("%s: Cannot be both Required and Computed", k)
+ }
+
+ if !v.Required && !v.Optional && !v.Computed {
+ return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
+ }
+
+ if v.Computed && v.Default != nil {
+ return fmt.Errorf("%s: Default must be nil if computed", k)
+ }
+
+ if v.Required && v.Default != nil {
+ return fmt.Errorf("%s: Default cannot be set with Required", k)
+ }
+
+ if len(v.ComputedWhen) > 0 && !v.Computed {
+ return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k)
+ }
+
+ if len(v.ConflictsWith) > 0 && v.Required {
+ return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k)
+ }
+
+ if len(v.ConflictsWith) > 0 {
+ for _, key := range v.ConflictsWith {
+ parts := strings.Split(key, ".")
+ sm := topSchemaMap
+ var target *Schema
+ for _, part := range parts {
+ // Skip index fields
+ if _, err := strconv.Atoi(part); err == nil {
+ continue
+ }
+
+ var ok bool
+ if target, ok = sm[part]; !ok {
+ return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key)
+ }
+
+ if subResource, ok := target.Elem.(*Resource); ok {
+ sm = schemaMap(subResource.Schema)
+ }
+ }
+ if target == nil {
+ return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm)
+ }
+ if target.Required {
+ return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key)
+ }
+
+ if len(target.ComputedWhen) > 0 {
+ return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key)
+ }
+ }
+ }
+
+ if v.Type == TypeList || v.Type == TypeSet {
+ if v.Elem == nil {
+ return fmt.Errorf("%s: Elem must be set for lists", k)
+ }
+
+ if v.Default != nil {
+ return fmt.Errorf("%s: Default is not valid for lists or sets", k)
+ }
+
+ if v.Type != TypeSet && v.Set != nil {
+ return fmt.Errorf("%s: Set can only be set for TypeSet", k)
+ }
+
+ switch t := v.Elem.(type) {
+ case *Resource:
+ if err := t.InternalValidate(topSchemaMap, true); err != nil {
+ return err
+ }
+ case *Schema:
+ bad := t.Computed || t.Optional || t.Required
+ if bad {
+ return fmt.Errorf(
+ "%s: Elem must have only Type set", k)
+ }
+ }
+ } else {
+ if v.MaxItems > 0 || v.MinItems > 0 {
+ return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k)
+ }
+ }
+
+ // Computed-only field
+ if v.Computed && !v.Optional {
+ if v.ValidateFunc != nil {
+ return fmt.Errorf("%s: ValidateFunc is for validating user input, "+
+ "there's nothing to validate on computed-only field", k)
+ }
+ if v.DiffSuppressFunc != nil {
+ return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+
+ " between config and state representation. "+
+ "There is no config for computed-only field, nothing to compare.", k)
+ }
+ }
+
+ if v.ValidateFunc != nil {
+ switch v.Type {
+ case TypeList, TypeSet:
+ return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) diff(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+
+ unsupressedDiff := new(terraform.InstanceDiff)
+ unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ var err error
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ err = m.diffString(k, schema, unsupressedDiff, d, all)
+ case TypeList:
+ err = m.diffList(k, schema, unsupressedDiff, d, all)
+ case TypeMap:
+ err = m.diffMap(k, schema, unsupressedDiff, d, all)
+ case TypeSet:
+ err = m.diffSet(k, schema, unsupressedDiff, d, all)
+ default:
+ err = fmt.Errorf("%s: unknown type %#v", k, schema.Type)
+ }
+
+ for attrK, attrV := range unsupressedDiff.Attributes {
+ if schema.DiffSuppressFunc != nil &&
+ attrV != nil &&
+ schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) {
+ continue
+ }
+
+ diff.Attributes[attrK] = attrV
+ }
+
+ return err
+}
+
+func (m schemaMap) diffList(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ o, n, _, computedList := d.diffChange(k)
+ if computedList {
+ n = nil
+ }
+ nSet := n != nil
+
+ // If we have an old value and no new value is set or will be
+ // computed once all variables can be interpolated and we're
+ // computed, then nothing has changed.
+ if o != nil && n == nil && !computedList && schema.Computed {
+ return nil
+ }
+
+ if o == nil {
+ o = []interface{}{}
+ }
+ if n == nil {
+ n = []interface{}{}
+ }
+ if s, ok := o.(*Set); ok {
+ o = s.List()
+ }
+ if s, ok := n.(*Set); ok {
+ n = s.List()
+ }
+ os := o.([]interface{})
+ vs := n.([]interface{})
+
+ // If the new value was set, and the two are equal, then we're done.
+ // We have to do this check here because sets might be NOT
+ // reflect.DeepEqual so we need to wait until we get the []interface{}
+ if !all && nSet && reflect.DeepEqual(os, vs) {
+ return nil
+ }
+
+ // Get the counts
+ oldLen := len(os)
+ newLen := len(vs)
+ oldStr := strconv.FormatInt(int64(oldLen), 10)
+
+ // If the whole list is computed, then say that the # is computed
+ if computedList {
+ diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{
+ Old: oldStr,
+ NewComputed: true,
+ RequiresNew: schema.ForceNew,
+ }
+ return nil
+ }
+
+ // If the counts are not the same, then record that diff
+ changed := oldLen != newLen
+ computed := oldLen == 0 && newLen == 0 && schema.Computed
+ if changed || computed || all {
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed,
+ ForceNew: schema.ForceNew,
+ }
+
+ newStr := ""
+ if !computed {
+ newStr = strconv.FormatInt(int64(newLen), 10)
+ } else {
+ oldStr = ""
+ }
+
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ })
+ }
+
+ // Figure out the maximum
+ maxLen := oldLen
+ if newLen > maxLen {
+ maxLen = newLen
+ }
+
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a complex resource
+ for i := 0; i < maxLen; i++ {
+ for k2, schema := range t.Schema {
+ subK := fmt.Sprintf("%s.%d.%s", k, i, k2)
+ err := m.diff(subK, schema, diff, d, all)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ case *Schema:
+ // Copy the schema so that we can set Computed/ForceNew from
+ // the parent schema (the TypeList).
+ t2 := *t
+ t2.ForceNew = schema.ForceNew
+
+ // This is just a primitive element, so go through each and
+ // just diff each.
+ for i := 0; i < maxLen; i++ {
+ subK := fmt.Sprintf("%s.%d", k, i)
+ err := m.diff(subK, &t2, diff, d, all)
+ if err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("%s: unknown element type (internal)", k)
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffMap(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ prefix := k + "."
+
+ // First get all the values from the state
+ var stateMap, configMap map[string]string
+ o, n, _, nComputed := d.diffChange(k)
+ if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ if err := mapstructure.WeakDecode(n, &configMap); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ // Keep track of whether the state _exists_ at all prior to clearing it
+ stateExists := o != nil
+
+ // Delete any count values, since we don't use those
+ delete(configMap, "%")
+ delete(stateMap, "%")
+
+ // Check if the number of elements has changed.
+ oldLen, newLen := len(stateMap), len(configMap)
+ changed := oldLen != newLen
+ if oldLen != 0 && newLen == 0 && schema.Computed {
+ changed = false
+ }
+
+ // It is computed if we have no old value, no new value, the schema
+ // says it is computed, and it didn't exist in the state before. The
+ // last point means: if it existed in the state, even empty, then it
+ // has already been computed.
+ computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists
+
+ // If the count has changed or we're computed, then add a diff for the
+ // count. "nComputed" means that the new value _contains_ a value that
+ // is computed. We don't do granular diffs for this yet, so we mark the
+ // whole map as computed.
+ if changed || computed || nComputed {
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed || nComputed,
+ ForceNew: schema.ForceNew,
+ }
+
+ oldStr := strconv.FormatInt(int64(oldLen), 10)
+ newStr := ""
+ if !computed && !nComputed {
+ newStr = strconv.FormatInt(int64(newLen), 10)
+ } else {
+ oldStr = ""
+ }
+
+ diff.Attributes[k+".%"] = countSchema.finalizeDiff(
+ &terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ },
+ )
+ }
+
+ // If the new map is nil and we're computed, then ignore it.
+ if n == nil && schema.Computed {
+ return nil
+ }
+
+ // Now we compare, preferring values from the config map
+ for k, v := range configMap {
+ old, ok := stateMap[k]
+ delete(stateMap, k)
+
+ if old == v && ok && !all {
+ continue
+ }
+
+ diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: old,
+ New: v,
+ })
+ }
+ for k, v := range stateMap {
+ diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: v,
+ NewRemoved: true,
+ })
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffSet(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+
+ o, n, _, computedSet := d.diffChange(k)
+ if computedSet {
+ n = nil
+ }
+ nSet := n != nil
+
+ // If we have an old value and no new value is set or will be
+ // computed once all variables can be interpolated and we're
+ // computed, then nothing has changed.
+ if o != nil && n == nil && !computedSet && schema.Computed {
+ return nil
+ }
+
+ if o == nil {
+ o = schema.ZeroValue().(*Set)
+ }
+ if n == nil {
+ n = schema.ZeroValue().(*Set)
+ }
+ os := o.(*Set)
+ ns := n.(*Set)
+
+ // If the new value was set, compare the listCode's to determine if
+ // the two are equal. Comparing listCode's instead of the actual values
+ // is needed because there could be computed values in the set which
+ // would result in false positives while comparing.
+ if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
+ return nil
+ }
+
+ // Get the counts
+ oldLen := os.Len()
+ newLen := ns.Len()
+ oldStr := strconv.Itoa(oldLen)
+ newStr := strconv.Itoa(newLen)
+
+ // Build a schema for our count
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed,
+ ForceNew: schema.ForceNew,
+ }
+
+ // If the set computed then say that the # is computed
+ if computedSet || schema.Computed && !nSet {
+ // If # already exists, equals 0 and no new set is supplied, there
+ // is nothing to record in the diff
+ count, ok := d.GetOk(k + ".#")
+ if ok && count.(int) == 0 && !nSet && !computedSet {
+ return nil
+ }
+
+ // Set the count but make sure that if # does not exist, we don't
+ // use the zeroed value
+ countStr := strconv.Itoa(count.(int))
+ if !ok {
+ countStr = ""
+ }
+
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: countStr,
+ NewComputed: true,
+ })
+ return nil
+ }
+
+ // If the counts are not the same, then record that diff
+ changed := oldLen != newLen
+ if changed || all {
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ })
+ }
+
+ // Build the list of codes that will make up our set. This is the
+ // removed codes as well as all the codes in the new codes.
+ codes := make([][]string, 2)
+ codes[0] = os.Difference(ns).listCode()
+ codes[1] = ns.listCode()
+ for _, list := range codes {
+ for _, code := range list {
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a complex resource
+ for k2, schema := range t.Schema {
+ subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
+ err := m.diff(subK, schema, diff, d, true)
+ if err != nil {
+ return err
+ }
+ }
+ case *Schema:
+ // Copy the schema so that we can set Computed/ForceNew from
+ // the parent schema (the TypeSet).
+ t2 := *t
+ t2.ForceNew = schema.ForceNew
+
+ // This is just a primitive element, so go through each and
+ // just diff each.
+ subK := fmt.Sprintf("%s.%s", k, code)
+ err := m.diff(subK, &t2, diff, d, true)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("%s: unknown element type (internal)", k)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffString(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ var originalN interface{}
+ var os, ns string
+ o, n, _, computed := d.diffChange(k)
+ if schema.StateFunc != nil && n != nil {
+ originalN = n
+ n = schema.StateFunc(n)
+ }
+ nraw := n
+ if nraw == nil && o != nil {
+ nraw = schema.Type.Zero()
+ }
+ if err := mapstructure.WeakDecode(o, &os); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ if err := mapstructure.WeakDecode(nraw, &ns); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ if os == ns && !all {
+ // They're the same value. If there old value is not blank or we
+ // have an ID, then return right away since we're already setup.
+ if os != "" || d.Id() != "" {
+ return nil
+ }
+
+ // Otherwise, only continue if we're computed
+ if !schema.Computed && !computed {
+ return nil
+ }
+ }
+
+ removed := false
+ if o != nil && n == nil {
+ removed = true
+ }
+ if removed && schema.Computed {
+ return nil
+ }
+
+ diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: os,
+ New: ns,
+ NewExtra: originalN,
+ NewRemoved: removed,
+ NewComputed: computed,
+ })
+
+ return nil
+}
+
+func (m schemaMap) inputString(
+ input terraform.UIInput,
+ k string,
+ schema *Schema) (interface{}, error) {
+ result, err := input.Input(&terraform.InputOpts{
+ Id: k,
+ Query: k,
+ Description: schema.Description,
+ Default: schema.InputDefault,
+ })
+
+ return result, err
+}
+
+func (m schemaMap) validate(
+ k string,
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ raw, ok := c.Get(k)
+ if !ok && schema.DefaultFunc != nil {
+ // We have a dynamic default. Check if we have a value.
+ var err error
+ raw, err = schema.DefaultFunc()
+ if err != nil {
+ return nil, []error{fmt.Errorf(
+ "%q, error loading default: %s", k, err)}
+ }
+
+ // We're okay as long as we had a value set
+ ok = raw != nil
+ }
+ if !ok {
+ if schema.Required {
+ return nil, []error{fmt.Errorf(
+ "%q: required field is not set", k)}
+ }
+
+ return nil, nil
+ }
+
+ if !schema.Required && !schema.Optional {
+ // This is a computed-only field
+ return nil, []error{fmt.Errorf(
+ "%q: this field cannot be set", k)}
+ }
+
+ err := m.validateConflictingAttributes(k, schema, c)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ return m.validateType(k, raw, schema, c)
+}
+
+func (m schemaMap) validateConflictingAttributes(
+ k string,
+ schema *Schema,
+ c *terraform.ResourceConfig) error {
+
+ if len(schema.ConflictsWith) == 0 {
+ return nil
+ }
+
+ for _, conflicting_key := range schema.ConflictsWith {
+ if value, ok := c.Get(conflicting_key); ok {
+ return fmt.Errorf(
+ "%q: conflicts with %s (%#v)", k, conflicting_key, value)
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) validateList(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ // We use reflection to verify the slice because you can't
+ // case to []interface{} unless the slice is exactly that type.
+ rawV := reflect.ValueOf(raw)
+
+ // If we support promotion and the raw value isn't a slice, wrap
+ // it in []interface{} and check again.
+ if schema.PromoteSingle && rawV.Kind() != reflect.Slice {
+ raw = []interface{}{raw}
+ rawV = reflect.ValueOf(raw)
+ }
+
+ if rawV.Kind() != reflect.Slice {
+ return nil, []error{fmt.Errorf(
+ "%s: should be a list", k)}
+ }
+
+ // Validate length
+ if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
+ return nil, []error{fmt.Errorf(
+ "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())}
+ }
+
+ if schema.MinItems > 0 && rawV.Len() < schema.MinItems {
+ return nil, []error{fmt.Errorf(
+ "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())}
+ }
+
+ // Now build the []interface{}
+ raws := make([]interface{}, rawV.Len())
+ for i, _ := range raws {
+ raws[i] = rawV.Index(i).Interface()
+ }
+
+ var ws []string
+ var es []error
+ for i, raw := range raws {
+ key := fmt.Sprintf("%s.%d", k, i)
+
+ // Reify the key value from the ResourceConfig.
+ // If the list was computed we have all raw values, but some of these
+ // may be known in the config, and aren't individually marked as Computed.
+ if r, ok := c.Get(key); ok {
+ raw = r
+ }
+
+ var ws2 []string
+ var es2 []error
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a sub-resource
+ ws2, es2 = m.validateObject(key, t.Schema, c)
+ case *Schema:
+ ws2, es2 = m.validateType(key, raw, t, c)
+ }
+
+ if len(ws2) > 0 {
+ ws = append(ws, ws2...)
+ }
+ if len(es2) > 0 {
+ es = append(es, es2...)
+ }
+ }
+
+ return ws, es
+}
+
+func (m schemaMap) validateMap(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ // We use reflection to verify the slice because you can't
+ // case to []interface{} unless the slice is exactly that type.
+ rawV := reflect.ValueOf(raw)
+ switch rawV.Kind() {
+ case reflect.String:
+ // If raw and reified are equal, this is a string and should
+ // be rejected.
+ reified, reifiedOk := c.Get(k)
+ if reifiedOk && raw == reified && !c.IsComputed(k) {
+ return nil, []error{fmt.Errorf("%s: should be a map", k)}
+ }
+ // Otherwise it's likely raw is an interpolation.
+ return nil, nil
+ case reflect.Map:
+ case reflect.Slice:
+ default:
+ return nil, []error{fmt.Errorf("%s: should be a map", k)}
+ }
+
+ // If it is not a slice, validate directly
+ if rawV.Kind() != reflect.Slice {
+ mapIface := rawV.Interface()
+ if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+ return nil, errs
+ }
+ if schema.ValidateFunc != nil {
+ return schema.ValidateFunc(mapIface, k)
+ }
+ return nil, nil
+ }
+
+ // It is a slice, verify that all the elements are maps
+ raws := make([]interface{}, rawV.Len())
+ for i, _ := range raws {
+ raws[i] = rawV.Index(i).Interface()
+ }
+
+ for _, raw := range raws {
+ v := reflect.ValueOf(raw)
+ if v.Kind() != reflect.Map {
+ return nil, []error{fmt.Errorf(
+ "%s: should be a map", k)}
+ }
+ mapIface := v.Interface()
+ if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+ return nil, errs
+ }
+ }
+
+ if schema.ValidateFunc != nil {
+ validatableMap := make(map[string]interface{})
+ for _, raw := range raws {
+ for k, v := range raw.(map[string]interface{}) {
+ validatableMap[k] = v
+ }
+ }
+
+ return schema.ValidateFunc(validatableMap, k)
+ }
+
+ return nil, nil
+}
+
+func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) {
+ for key, raw := range m {
+ valueType, err := getValueType(k, schema)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ switch valueType {
+ case TypeBool:
+ var n bool
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeInt:
+ var n int
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeFloat:
+ var n float64
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeString:
+ var n string
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ default:
+ panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+ }
+ }
+ return nil, nil
+}
+
+func getValueType(k string, schema *Schema) (ValueType, error) {
+ if schema.Elem == nil {
+ return TypeString, nil
+ }
+ if vt, ok := schema.Elem.(ValueType); ok {
+ return vt, nil
+ }
+
+ if s, ok := schema.Elem.(*Schema); ok {
+ if s.Elem == nil {
+ return TypeString, nil
+ }
+ if vt, ok := s.Elem.(ValueType); ok {
+ return vt, nil
+ }
+ }
+
+ if _, ok := schema.Elem.(*Resource); ok {
+ // TODO: We don't actually support this (yet)
+ // but silently pass the validation, until we decide
+ // how to handle nested structures in maps
+ return TypeString, nil
+ }
+ return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem)
+}
+
+func (m schemaMap) validateObject(
+ k string,
+ schema map[string]*Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ raw, _ := c.GetRaw(k)
+ if _, ok := raw.(map[string]interface{}); !ok {
+ return nil, []error{fmt.Errorf(
+ "%s: expected object, got %s",
+ k, reflect.ValueOf(raw).Kind())}
+ }
+
+ var ws []string
+ var es []error
+ for subK, s := range schema {
+ key := subK
+ if k != "" {
+ key = fmt.Sprintf("%s.%s", k, subK)
+ }
+
+ ws2, es2 := m.validate(key, s, c)
+ if len(ws2) > 0 {
+ ws = append(ws, ws2...)
+ }
+ if len(es2) > 0 {
+ es = append(es, es2...)
+ }
+ }
+
+ // Detect any extra/unknown keys and report those as errors.
+ if m, ok := raw.(map[string]interface{}); ok {
+ for subk, _ := range m {
+ if _, ok := schema[subk]; !ok {
+ if subk == TimeoutsConfigKey {
+ continue
+ }
+ es = append(es, fmt.Errorf(
+ "%s: invalid or unknown key: %s", k, subk))
+ }
+ }
+ }
+
+ return ws, es
+}
+
+func (m schemaMap) validatePrimitive(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+
+ // Catch if the user gave a complex type where a primitive was
+ // expected, so we can return a friendly error message that
+ // doesn't contain Go type system terminology.
+ switch reflect.ValueOf(raw).Type().Kind() {
+ case reflect.Slice:
+ return nil, []error{
+ fmt.Errorf("%s must be a single value, not a list", k),
+ }
+ case reflect.Map:
+ return nil, []error{
+ fmt.Errorf("%s must be a single value, not a map", k),
+ }
+ default: // ok
+ }
+
+ if c.IsComputed(k) {
+ // If the key is being computed, then it is not an error as
+ // long as it's not a slice or map.
+ return nil, nil
+ }
+
+ var decoded interface{}
+ switch schema.Type {
+ case TypeBool:
+ // Verify that we can parse this as the correct type
+ var n bool
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeInt:
+ // Verify that we can parse this as an int
+ var n int
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeFloat:
+ // Verify that we can parse this as an int
+ var n float64
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeString:
+ // Verify that we can parse this as a string
+ var n string
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ default:
+ panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+ }
+
+ if schema.ValidateFunc != nil {
+ return schema.ValidateFunc(decoded, k)
+ }
+
+ return nil, nil
+}
+
+func (m schemaMap) validateType(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ var ws []string
+ var es []error
+ switch schema.Type {
+ case TypeSet, TypeList:
+ ws, es = m.validateList(k, raw, schema, c)
+ case TypeMap:
+ ws, es = m.validateMap(k, raw, schema, c)
+ default:
+ ws, es = m.validatePrimitive(k, raw, schema, c)
+ }
+
+ if schema.Deprecated != "" {
+ ws = append(ws, fmt.Sprintf(
+ "%q: [DEPRECATED] %s", k, schema.Deprecated))
+ }
+
+ if schema.Removed != "" {
+ es = append(es, fmt.Errorf(
+ "%q: [REMOVED] %s", k, schema.Removed))
+ }
+
+ return ws, es
+}
+
+// Zero returns the zero value for a type.
+func (t ValueType) Zero() interface{} {
+ switch t {
+ case TypeInvalid:
+ return nil
+ case TypeBool:
+ return false
+ case TypeInt:
+ return 0
+ case TypeFloat:
+ return 0.0
+ case TypeString:
+ return ""
+ case TypeList:
+ return []interface{}{}
+ case TypeMap:
+ return map[string]interface{}{}
+ case TypeSet:
+ return new(Set)
+ case typeObject:
+ return map[string]interface{}{}
+ default:
+ panic(fmt.Sprintf("unknown type %s", t))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
new file mode 100644
index 00000000..3eb2d007
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
@@ -0,0 +1,122 @@
+package schema
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+)
+
+func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
+ if val == nil {
+ buf.WriteRune(';')
+ return
+ }
+
+ switch schema.Type {
+ case TypeBool:
+ if val.(bool) {
+ buf.WriteRune('1')
+ } else {
+ buf.WriteRune('0')
+ }
+ case TypeInt:
+ buf.WriteString(strconv.Itoa(val.(int)))
+ case TypeFloat:
+ buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
+ case TypeString:
+ buf.WriteString(val.(string))
+ case TypeList:
+ buf.WriteRune('(')
+ l := val.([]interface{})
+ for _, innerVal := range l {
+ serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+ }
+ buf.WriteRune(')')
+ case TypeMap:
+
+ m := val.(map[string]interface{})
+ var keys []string
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ buf.WriteRune('[')
+ for _, k := range keys {
+ innerVal := m[k]
+ if innerVal == nil {
+ continue
+ }
+ buf.WriteString(k)
+ buf.WriteRune(':')
+
+ switch innerVal := innerVal.(type) {
+ case int:
+ buf.WriteString(strconv.Itoa(innerVal))
+ case float64:
+ buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
+ case string:
+ buf.WriteString(innerVal)
+ default:
+ panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
+ }
+
+ buf.WriteRune(';')
+ }
+ buf.WriteRune(']')
+ case TypeSet:
+ buf.WriteRune('{')
+ s := val.(*Set)
+ for _, innerVal := range s.List() {
+ serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+ }
+ buf.WriteRune('}')
+ default:
+ panic("unknown schema type to serialize")
+ }
+ buf.WriteRune(';')
+}
+
+// SerializeValueForHash appends a serialization of the given resource config
+// to the given buffer, guaranteeing deterministic results given the same value
+// and schema.
+//
+// Its primary purpose is as input into a hashing function in order
+// to hash complex substructures when used in sets, and so the serialization
+// is not reversible.
+func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
+ sm := resource.Schema
+ m := val.(map[string]interface{})
+ var keys []string
+ for k := range sm {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ innerSchema := sm[k]
+ // Skip attributes that are not user-provided. Computed attributes
+ // do not contribute to the hash since their ultimate value cannot
+ // be known at plan/diff time.
+ if !(innerSchema.Required || innerSchema.Optional) {
+ continue
+ }
+
+ buf.WriteString(k)
+ buf.WriteRune(':')
+ innerVal := m[k]
+ SerializeValueForHash(buf, innerVal, innerSchema)
+ }
+}
+
+func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
+ switch tElem := elem.(type) {
+ case *Schema:
+ SerializeValueForHash(buf, val, tElem)
+ case *Resource:
+ buf.WriteRune('<')
+ SerializeResourceForHash(buf, val, tElem)
+ buf.WriteString(">;")
+ default:
+ panic(fmt.Sprintf("invalid element type: %T", tElem))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
new file mode 100644
index 00000000..de05f40e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -0,0 +1,209 @@
+package schema
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+
+ "github.com/hashicorp/terraform/helper/hashcode"
+)
+
+// HashString hashes strings. If you want a Set of strings, this is the
+// SchemaSetFunc you want.
+func HashString(v interface{}) int {
+ return hashcode.String(v.(string))
+}
+
+// HashResource hashes complex structures that are described using
+// a *Resource. This is the default set implementation used when a set's
+// element type is a full resource.
+func HashResource(resource *Resource) SchemaSetFunc {
+ return func(v interface{}) int {
+ var buf bytes.Buffer
+ SerializeResourceForHash(&buf, v, resource)
+ return hashcode.String(buf.String())
+ }
+}
+
+// HashSchema hashes values that are described using a *Schema. This is the
+// default set implementation used when a set's element type is a single
+// schema.
+func HashSchema(schema *Schema) SchemaSetFunc {
+ return func(v interface{}) int {
+ var buf bytes.Buffer
+ SerializeValueForHash(&buf, v, schema)
+ return hashcode.String(buf.String())
+ }
+}
+
+// Set is a set data structure that is returned for elements of type
+// TypeSet.
+type Set struct {
+ F SchemaSetFunc
+
+ m map[string]interface{}
+ once sync.Once
+}
+
+// NewSet is a convenience method for creating a new set with the given
+// items.
+func NewSet(f SchemaSetFunc, items []interface{}) *Set {
+ s := &Set{F: f}
+ for _, i := range items {
+ s.Add(i)
+ }
+
+ return s
+}
+
+// CopySet returns a copy of another set.
+func CopySet(otherSet *Set) *Set {
+ return NewSet(otherSet.F, otherSet.List())
+}
+
+// Add adds an item to the set if it isn't already in the set.
+func (s *Set) Add(item interface{}) {
+ s.add(item, false)
+}
+
+// Remove removes an item if it's already in the set. Idempotent.
+func (s *Set) Remove(item interface{}) {
+ s.remove(item)
+}
+
+// Contains checks if the set has the given item.
+func (s *Set) Contains(item interface{}) bool {
+ _, ok := s.m[s.hash(item)]
+ return ok
+}
+
+// Len returns the amount of items in the set.
+func (s *Set) Len() int {
+ return len(s.m)
+}
+
+// List returns the elements of this set in slice format.
+//
+// The order of the returned elements is deterministic. Given the same
+// set, the order of this will always be the same.
+func (s *Set) List() []interface{} {
+ result := make([]interface{}, len(s.m))
+ for i, k := range s.listCode() {
+ result[i] = s.m[k]
+ }
+
+ return result
+}
+
+// Difference performs a set difference of the two sets, returning
+// a new third set that has only the elements unique to this set.
+func (s *Set) Difference(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ if _, ok := other.m[k]; !ok {
+ result.m[k] = v
+ }
+ }
+
+ return result
+}
+
+// Intersection performs the set intersection of the two sets
+// and returns a new third set.
+func (s *Set) Intersection(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ if _, ok := other.m[k]; ok {
+ result.m[k] = v
+ }
+ }
+
+ return result
+}
+
+// Union performs the set union of the two sets and returns a new third
+// set.
+func (s *Set) Union(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ result.m[k] = v
+ }
+ for k, v := range other.m {
+ result.m[k] = v
+ }
+
+ return result
+}
+
+func (s *Set) Equal(raw interface{}) bool {
+ other, ok := raw.(*Set)
+ if !ok {
+ return false
+ }
+
+ return reflect.DeepEqual(s.m, other.m)
+}
+
+func (s *Set) GoString() string {
+ return fmt.Sprintf("*Set(%#v)", s.m)
+}
+
+func (s *Set) init() {
+ s.m = make(map[string]interface{})
+}
+
+func (s *Set) add(item interface{}, computed bool) string {
+ s.once.Do(s.init)
+
+ code := s.hash(item)
+ if computed {
+ code = "~" + code
+ }
+
+ if _, ok := s.m[code]; !ok {
+ s.m[code] = item
+ }
+
+ return code
+}
+
+func (s *Set) hash(item interface{}) string {
+ code := s.F(item)
+ // Always return a nonnegative hashcode.
+ if code < 0 {
+ code = -code
+ }
+ return strconv.Itoa(code)
+}
+
+func (s *Set) remove(item interface{}) string {
+ s.once.Do(s.init)
+
+ code := s.hash(item)
+ delete(s.m, code)
+
+ return code
+}
+
+func (s *Set) index(item interface{}) int {
+ return sort.SearchStrings(s.listCode(), s.hash(item))
+}
+
+func (s *Set) listCode() []string {
+ // Sort the hash codes so the order of the list is deterministic
+ keys := make([]string, 0, len(s.m))
+ for k := range s.m {
+ keys = append(keys, k)
+ }
+ sort.Sort(sort.StringSlice(keys))
+ return keys
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
new file mode 100644
index 00000000..9765bdbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -0,0 +1,30 @@
+package schema
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// TestResourceDataRaw creates a ResourceData from a raw configuration map.
+func TestResourceDataRaw(
+ t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
+ c, err := config.NewRawConfig(raw)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ sm := schemaMap(schema)
+ diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ result, err := sm.Data(nil, diff)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
new file mode 100644
index 00000000..9286987d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
@@ -0,0 +1,21 @@
+package schema
+
+//go:generate stringer -type=ValueType valuetype.go
+
+// ValueType is an enum of the type that can be represented by a schema.
+type ValueType int
+
+const (
+ TypeInvalid ValueType = iota
+ TypeBool
+ TypeInt
+ TypeFloat
+ TypeString
+ TypeList
+ TypeMap
+ TypeSet
+ typeObject
+)
+
+// NOTE: ValueType has more functions defined on it in schema.go. We can't
+// put them here because we reference other files.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
new file mode 100644
index 00000000..1610cec2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
+
+var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
+
+func (i ValueType) String() string {
+ if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
+ return fmt.Sprintf("ValueType(%d)", i)
+ }
+ return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
new file mode 100644
index 00000000..7edd5e75
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
@@ -0,0 +1,80 @@
+package shadow
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// Close will close all shadow values within the given structure.
+//
+// This uses reflection to walk the structure, find all shadow elements,
+// and close them. Currently this will only find struct fields that are
+// shadow values, and not slice elements, etc.
+func Close(v interface{}) error {
+ // We require a pointer so we can address the internal fields
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return fmt.Errorf("value must be a pointer")
+ }
+
+ // Walk and close
+ var w closeWalker
+ if err := reflectwalk.Walk(v, &w); err != nil {
+ return err
+ }
+
+ return w.Err
+}
+
+type closeWalker struct {
+ Err error
+}
+
+func (w *closeWalker) Struct(reflect.Value) error {
+ // Do nothing. We implement this for reflectwalk.StructWalker
+ return nil
+}
+
+func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
+ // Not sure why this would be but lets avoid some panics
+ if !v.IsValid() {
+ return nil
+ }
+
+ // Empty for exported, so don't check unexported fields
+ if f.PkgPath != "" {
+ return nil
+ }
+
+ // Verify the io.Closer is in this package
+ typ := v.Type()
+ if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
+ return nil
+ }
+
+ // We're looking for an io.Closer
+ raw := v.Interface()
+ if raw == nil {
+ return nil
+ }
+
+ closer, ok := raw.(io.Closer)
+ if !ok && v.CanAddr() {
+ closer, ok = v.Addr().Interface().(io.Closer)
+ }
+ if !ok {
+ return reflectwalk.SkipEntry
+ }
+
+ // Close it
+ if err := closer.Close(); err != nil {
+ w.Err = multierror.Append(w.Err, err)
+ }
+
+ // Don't go into the struct field
+ return reflectwalk.SkipEntry
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
new file mode 100644
index 00000000..4223e925
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
@@ -0,0 +1,128 @@
+package shadow
+
+import (
+ "sync"
+)
+
+// ComparedValue is a struct that finds a value by comparing some key
+// to the list of stored values. This is useful when there is no easy
+// uniquely identifying key that works in a map (for that, use KeyedValue).
+//
+// ComparedValue is very expensive, relative to other Value types. Try to
+// limit the number of values stored in a ComparedValue by potentially
+// nesting it within a KeyedValue (a keyed value points to a compared value,
+// for example).
+type ComparedValue struct {
+ // Func is a function that is given the lookup key and a single
+ // stored value. If it matches, it returns true.
+ Func func(k, v interface{}) bool
+
+ lock sync.Mutex
+ once sync.Once
+ closed bool
+ values []interface{}
+ waiters map[interface{}]*Value
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *ComparedValue) Close() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set closed to true always
+ w.closed = true
+
+ // For all waiters, complete with ErrClosed
+ for k, val := range w.waiters {
+ val.SetValue(ErrClosed)
+ delete(w.waiters, k)
+ }
+
+ return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *ComparedValue) Value(k interface{}) interface{} {
+ v, val := w.valueWaiter(k)
+ if val == nil {
+ return v
+ }
+
+ return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
+ v, val := w.valueWaiter(k)
+ return v, val == nil
+}
+
+func (w *ComparedValue) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.once.Do(w.init)
+
+ // Check if we already have this exact value (by simply comparing
+ // with == directly). If we do, then we don't insert it again.
+ found := false
+ for _, v2 := range w.values {
+ if v == v2 {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ // Set the value, always
+ w.values = append(w.values, v)
+ }
+
+ // Go through the waiters
+ for k, val := range w.waiters {
+ if w.Func(k, v) {
+ val.SetValue(v)
+ delete(w.waiters, k)
+ }
+ }
+}
+
+func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
+ w.lock.Lock()
+ w.once.Do(w.init)
+
+ // Look for a pre-existing value
+ for _, v := range w.values {
+ if w.Func(k, v) {
+ w.lock.Unlock()
+ return v, nil
+ }
+ }
+
+ // If we're closed, return that
+ if w.closed {
+ w.lock.Unlock()
+ return ErrClosed, nil
+ }
+
+ // Pre-existing value doesn't exist, create a waiter
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+ w.lock.Unlock()
+
+ // Return the waiter
+ return nil, val
+}
+
+// Must be called with w.lock held.
+func (w *ComparedValue) init() {
+ w.waiters = make(map[interface{}]*Value)
+ if w.Func == nil {
+ w.Func = func(k, v interface{}) bool { return k == v }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
new file mode 100644
index 00000000..432b0366
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
@@ -0,0 +1,151 @@
+package shadow
+
+import (
+ "sync"
+)
+
+// KeyedValue is a struct that coordinates a value by key. If a value is
+// not available for a give key, it'll block until it is available.
+type KeyedValue struct {
+ lock sync.Mutex
+ once sync.Once
+ values map[string]interface{}
+ waiters map[string]*Value
+ closed bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *KeyedValue) Close() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set closed to true always
+ w.closed = true
+
+ // For all waiters, complete with ErrClosed
+ for k, val := range w.waiters {
+ val.SetValue(ErrClosed)
+ delete(w.waiters, k)
+ }
+
+ return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *KeyedValue) Value(k string) interface{} {
+ w.lock.Lock()
+ v, val := w.valueWaiter(k)
+ w.lock.Unlock()
+
+ // If we have no waiter, then return the value
+ if val == nil {
+ return v
+ }
+
+ // We have a waiter, so wait
+ return val.Value()
+}
+
+// WaitForChange waits for the value with the given key to be set again.
+// If the key isn't set, it'll wait for an initial value. Note that while
+// it is called "WaitForChange", the value isn't guaranteed to _change_;
+// this will return when a SetValue is called for the given k.
+func (w *KeyedValue) WaitForChange(k string) interface{} {
+ w.lock.Lock()
+ w.once.Do(w.init)
+
+ // If we're closed, we're closed
+ if w.closed {
+ w.lock.Unlock()
+ return ErrClosed
+ }
+
+ // Check for an active waiter. If there isn't one, make it
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+ w.lock.Unlock()
+
+ // And wait
+ return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ v, val := w.valueWaiter(k)
+ return v, val == nil
+}
+
+func (w *KeyedValue) SetValue(k string, v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.setValue(k, v)
+}
+
+// Init will initialize the key to a given value only if the key has
+// not been set before. This is safe to call multiple times and in parallel.
+func (w *KeyedValue) Init(k string, v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we have a waiter, set the value.
+ _, val := w.valueWaiter(k)
+ if val != nil {
+ w.setValue(k, v)
+ }
+}
+
+// Must be called with w.lock held.
+func (w *KeyedValue) init() {
+ w.values = make(map[string]interface{})
+ w.waiters = make(map[string]*Value)
+}
+
+// setValue is like SetValue but assumes the lock is held.
+func (w *KeyedValue) setValue(k string, v interface{}) {
+ w.once.Do(w.init)
+
+ // Set the value, always
+ w.values[k] = v
+
+ // If we have a waiter, set it
+ if val, ok := w.waiters[k]; ok {
+ val.SetValue(v)
+ delete(w.waiters, k)
+ }
+}
+
+// valueWaiter gets the value or the Value waiter for a given key.
+//
+// This must be called with lock held.
+func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
+ w.once.Do(w.init)
+
+ // If we have this value already, return it
+ if v, ok := w.values[k]; ok {
+ return v, nil
+ }
+
+ // If we're closed, return that
+ if w.closed {
+ return ErrClosed, nil
+ }
+
+ // No pending value, check for a waiter
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+
+ // Return the waiter
+ return nil, val
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
new file mode 100644
index 00000000..0a43d4d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
@@ -0,0 +1,66 @@
+package shadow
+
+import (
+ "container/list"
+ "sync"
+)
+
+// OrderedValue is a struct that keeps track of a value in the order
+// it is set. Each time Value() is called, it will return the most recent
+// calls value then discard it.
+//
+// This is unlike Value that returns the same value once it is set.
+type OrderedValue struct {
+ lock sync.Mutex
+ values *list.List
+ waiters *list.List
+}
+
+// Value returns the last value that was set, or blocks until one
+// is received.
+func (w *OrderedValue) Value() interface{} {
+ w.lock.Lock()
+
+ // If we have a pending value already, use it
+ if w.values != nil && w.values.Len() > 0 {
+ front := w.values.Front()
+ w.values.Remove(front)
+ w.lock.Unlock()
+ return front.Value
+ }
+
+ // No pending value, create a waiter
+ if w.waiters == nil {
+ w.waiters = list.New()
+ }
+
+ var val Value
+ w.waiters.PushBack(&val)
+ w.lock.Unlock()
+
+ // Return the value once we have it
+ return val.Value()
+}
+
+// SetValue sets the latest value.
+func (w *OrderedValue) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we have a waiter, notify it
+ if w.waiters != nil && w.waiters.Len() > 0 {
+ front := w.waiters.Front()
+ w.waiters.Remove(front)
+
+ val := front.Value.(*Value)
+ val.SetValue(v)
+ return
+ }
+
+ // Add it to the list of values
+ if w.values == nil {
+ w.values = list.New()
+ }
+
+ w.values.PushBack(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
new file mode 100644
index 00000000..2413335b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
@@ -0,0 +1,79 @@
+package shadow
+
+import (
+ "errors"
+ "sync"
+)
+
+// ErrClosed is returned by any closed values.
+//
+// A "closed value" is when the shadow has been notified that the real
+// side is complete and any blocking values will _never_ be satisfied
+// in the future. In this case, this error is returned. If a value is already
+// available, that is still returned.
+var ErrClosed = errors.New("shadow closed")
+
+// Value is a struct that coordinates a value between two
+// parallel routines. It is similar to atomic.Value except that when
+// Value is called if it isn't set it will wait for it.
+//
+// The Value can be closed with Close, which will cause any future
+// blocking operations to return immediately with ErrClosed.
+type Value struct {
+ lock sync.Mutex
+ cond *sync.Cond
+ value interface{}
+ valueSet bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the struct docs.
+func (w *Value) Close() error {
+ w.lock.Lock()
+ set := w.valueSet
+ w.lock.Unlock()
+
+ // If we haven't set the value, set it
+ if !set {
+ w.SetValue(ErrClosed)
+ }
+
+ // Done
+ return nil
+}
+
+// Value returns the value that was set.
+func (w *Value) Value() interface{} {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we already have a value just return
+ for !w.valueSet {
+ // No value, setup the condition variable if we have to
+ if w.cond == nil {
+ w.cond = sync.NewCond(&w.lock)
+ }
+
+ // Wait on it
+ w.cond.Wait()
+ }
+
+ // Return the value
+ return w.value
+}
+
+// SetValue sets the value.
+func (w *Value) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set the value
+ w.valueSet = true
+ w.value = v
+
+ // If we have a condition, clear it
+ if w.cond != nil {
+ w.cond.Broadcast()
+ w.cond = nil
+ }
+}